package exp;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.TreeMap;

import opennlp.tools.sentdetect.SentenceDetectorME;

import exp.web.cnn.CNNExtraction;
import exp.web.usatoday.USATodayExtraction;

import tools.FileReaderEnhance;
import tools.FileWriterEnhance;
import tools.ObjectTools;
import tools.nlp.SentenceDetector;
import tools.twitter.TweetTidy;
import tools.twitter.URLTools;

/**
 * Align tweets to cnn, USA Today news
 * @author Lanjun
 *
 */
public class EXP8 {
	public void run(String inputPath, String tPath, String objOutputPath, String textOutputPath) {
		SentenceDetector sd = new SentenceDetector();
		SentenceDetectorME sdm = sd.getSentenceDetector();
		
		for (File file : (new File(inputPath)).listFiles()) {
			@SuppressWarnings("unchecked")
			HashMap<String, URLContent> hls = (HashMap<String, URLContent>) ObjectTools.readFromFile(file);
			
			HashMap<String, URLContent> map = new HashMap<>();
			
			// url to tweets mapping
			HashMap<String, ArrayList<String>> url2Tweets = new HashMap<>();
			
			// CNN/USAToday url to URLContent mapping
			HashMap<String, URLContent> url2Content = new HashMap<>();

			for (URLContent temp : hls.values()) {
				if (temp.highlights == null) {
					continue;
				}
				for (String url : temp.shortURL) {
					map.put(url, temp);
				}
				URL tempURL = null;
				try {
					tempURL = new URL(temp.url);
				} catch (MalformedURLException e) {
					e.printStackTrace();
				}
				String host = tempURL.getHost();
				if (temp.highlights != null) {
					if (host.equals("edition.cnn.com") && (!temp.url.startsWith("http://edition.cnn.com/video"))
							&& (!temp.url.startsWith("http://edition.cnn.com/SPECIALS"))
							&& (!temp.url.startsWith("http://edition.cnn.com/interactive"))) {
						url2Content.put(temp.url, CNNExtraction.tidy(temp));
					}   else if (host.equals("www.usatoday.com")
							&& (!temp.url.startsWith("http://www.usatoday.com/videos"))) {
						url2Content.put(temp.url, USATodayExtraction.tidy(temp));
					}
				}
			}

			//read all the tweets
			String[] lines = FileReaderEnhance.readToString(tPath + file.getName(), "UTF-8").split("\n");
			for (String tweet : lines) {
				// System.out.println(line);
				tweet = TweetTidy.doTidyHTML(tweet);
				//get all the urls in the tweet
				ArrayList<String> urls = URLTools.getURLs(tweet);
				if (urls.size() >= 1) {
					for (String url : urls) {
						if (map.containsKey(url)) {
							URLContent uc = map.get(url);
							URL tempURL = null;
							try {
								tempURL = new URL(uc.url);
							} catch (MalformedURLException e) {
								e.printStackTrace();
							}
							String host = tempURL.getHost();
							if (uc.highlights != null) {
								if (host.equals("edition.cnn.com") && (!uc.url.startsWith("http://edition.cnn.com/video"))
										&& (!uc.url.startsWith("http://edition.cnn.com/SPECIALS"))
										&& (!uc.url.startsWith("http://edition.cnn.com/interactive"))) {
									String newURL = uc.url;
									if (url2Content.containsKey(newURL)) {
										if (url2Tweets.containsKey(newURL)) {
											ArrayList<String> tweets = url2Tweets.get(newURL);
											tweets.add(tweet);
										} else {
											ArrayList<String> tweets = new ArrayList<>();
											tweets.add(tweet);
											url2Tweets.put(newURL, tweets);
										}
									}
								}   else if (host.equals("www.usatoday.com")
										&& (!uc.url.startsWith("http://www.usatoday.com/videos"))) {
									String newURL = uc.url;
									if (url2Content.containsKey(newURL)) {
										if (url2Tweets.containsKey(newURL)) {
											ArrayList<String> tweets = url2Tweets.get(newURL);
											tweets.add(tweet);
										} else {
											ArrayList<String> tweets = new ArrayList<>();
											tweets.add(tweet);
											url2Tweets.put(newURL, tweets);
										}
									}
								}
							}
						}
					}
				}
			}
			
			//sort url2tweets
			TreeMap<Integer, ArrayList<String>> u2tSort = new TreeMap<>();
			for (String url : url2Content.keySet()) {
				int count = url2Tweets.get(url).size();
				if (u2tSort.containsKey(count)) {
					ArrayList<String> urls = u2tSort.get(count);
					urls.add(url);
				} else {
					ArrayList<String> urls = new ArrayList<>();
					urls.add(url);
					u2tSort.put(count, urls);
				}
			}
			
			int rank = 1;
			for (Integer count : u2tSort.descendingKeySet()) {
				int rank1 = 1;
				for (String url : u2tSort.get(count)) {
					StringBuilder sb = new StringBuilder();
					StringBuilder t = new StringBuilder();
					StringBuilder c = new StringBuilder();
					sb.append("[URL] ");
					sb.append('\r');
					sb.append('\n');
					sb.append(url);
					sb.append('\r');
					sb.append('\n');
					sb.append('\r');
					sb.append('\n');
					URLContent uc = url2Content.get(url);
					sb.append("[TITLE] ");
					sb.append('\r');
					sb.append('\n');
					sb.append(uc.title);
					sb.append('\r');
					sb.append('\n');
					sb.append('\r');
					sb.append('\n');
					sb.append("[HIGHLIGHTS] ");
					sb.append('\r');
					sb.append('\n');
					for (String highlight : uc.highlights) {
						sb.append(highlight);
						sb.append('\r');
						sb.append('\n');
					}
					
					for (String content : uc.contents) {
						String [] sens = sdm.sentDetect(content);
						for (String sen : sens) {
							c.append(sen);
							c.append('\r');
							c.append('\n');
						}
						c.append('\r');
						c.append('\n');
					}
					ArrayList<String> tweets = url2Tweets.get(url);
					uc.tweets = tweets;
					if (tweets != null) {
						for (String tweet : tweets) {
							t.append(tweet);
							t.append('\r');
							t.append('\n');
							t.append('\r');
							t.append('\n');
						}
					}
					String filename = (rank < 10 ? ("0" + rank) : rank) + "_" + (rank1 < 10 ? ("0" + rank1) : rank1) + "_" + count;
					FileWriterEnhance fwe = new FileWriterEnhance(textOutputPath + file.getName() + "/highlights/" + filename, "UTF-8");
					fwe.WriteToFile(sb.toString());
					FileWriterEnhance fwe1 = new FileWriterEnhance(textOutputPath + file.getName() + "/contents/" + filename, "UTF-8");
					fwe1.WriteToFile(c.toString());
					FileWriterEnhance fwe2 = new FileWriterEnhance(textOutputPath + file.getName() + "/tweets/" + filename, "UTF-8");
					fwe2.WriteToFile(t.toString());
					rank1++;
				}
				rank++;
			}
			
			ObjectTools.writeToFile(url2Tweets, objOutputPath + file.getName());
		}
	}
	
	public static void main(String[] args) {
		try {
			System.setOut(new PrintStream(new File("./logs/EXP8.out.log")));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		}
		String inputPath = "data/_newData/obj/highlights/";
		/**
		 * hPath_1 is for the files of hPath with groupped tweets added
		 */
		String outputPath = "data/_newData/obj/highlights_CNN_USATODAY/";
		String tPath = "data/_newData/plainText/";
		String outputPath1 = "data/_newData/news_2_tweets/";
		
		EXP8 exp8 = new EXP8();
		exp8.run(inputPath, tPath, outputPath, outputPath1);
	}
}