package exp.gold;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Map;
import java.util.TreeMap;
import java.util.Vector;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.ObjectTools;
import tools.nlp.TokenizerSimple;
import tools.similarity.Cosine;
import tools.twitter.TweetTidy;
import dragon.nlp.tool.lemmatiser.EngLemmatiser;
import edu.hit.irlab.nlp.stopwords.StopWords;

/**
 * Rank the tweets according to the highlights
 * <br /> All the highlights is equally treated.
 * <br /> Ranking level: strongly relevant (1), relevant(2), weak relevant(3), irrelevant(4)
 * <br /> strongly relevant : cosine >= 0.8
 * <br /> relevant : 0.8 > cosine >= 0.7
 * <br /> weak relevant : 0.7 > cosine >= 0.5
 * <br /> irrelevant : cosine < 0.5
 * @author lzhou
 *
 */
public class GenerateGoldT_Rank {
	static Vector<String> sw = (new StopWords("data/stopwords.dat")).getStopwords();
	static HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();
	static {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
	}
	static TokenizerSimple ts = new TokenizerSimple();
	static Tokenizer tokenizer = ts.getTokenizer();
	static EngLemmatiser el = new EngLemmatiser("data/_DragonTool/nlpdata/lemmatiser", true, false);
	
	static double STRONGRELEVANT = 0.8;
	static double RELEVANT = 0.7;
	static double WEAKRELEVANT = 0.5;

	public void run(String tPath, String hlrankPath, String outPath) {
		for (File file : (new File(tPath).listFiles())) {
			String[] tweets = FileReaderEnhance.readToString(tPath + file.getName(), "UTF-8").split("\n");

			//initial all tweets to word-count pair;
			ArrayList<Map<String, Integer>> allTweets = new ArrayList<>();
			for (String tweet : tweets) {
				String tweet1 = TweetTidy.doTidyAll(tweet);
				String [] words = tokenizer.tokenize(tweet1);
				for (int k = 0; k < words.length; k++) {
					words[k] = el.lemmatize(words[k]);
				}
				Map<String, Integer> tWords = new HashMap<>();
				for (String word : words) {
					if (stopwords.containsKey(word)) {
						continue;
					}
					if (tWords.containsKey(word)) {
						tWords.put(word, tWords.get(word) + 1);
					} else {
						tWords.put(word, 1);
					}
				}
				allTweets.add(tWords);
			}
			
			//initial all highlights to word-count pair;
			ArrayList<Map<String, Integer>> allHLs = new ArrayList<>();
			ArrayList<String> allHLText = new ArrayList<>();
			ArrayList<Integer> allHLRank = new ArrayList<>();
			@SuppressWarnings("unchecked")
			TreeMap<Integer, ArrayList<String>> sorted = (TreeMap<Integer, ArrayList<String>>) ObjectTools.readFromFile(hlrankPath + file.getName());
			int rank = 1;
			for (Integer count : sorted.descendingKeySet()) {
				ArrayList<String> hls = sorted.get(count);
				for (String hl : hls) {
					allHLText.add(hl);
					allHLRank.add(rank);
					String [] words = tokenizer.tokenize(hl);
					for (int k = 0; k < words.length; k++) {
						words[k] = el.lemmatize(words[k]);
					}
					Map<String, Integer> tWords = new HashMap<>();
					for (String word : words) {
						if (stopwords.containsKey(word)) {
							continue;
						}
						if (tWords.containsKey(word)) {
							tWords.put(word, tWords.get(word) + 1);
						} else {
							tWords.put(word, 1);
						}
					}
					allHLs.add(tWords);
				}
				rank++;
			}
			
			//calculate the cosine similarity and output similar tweets for each highlights
			ArrayList<Integer> labels = new ArrayList<>(tweets.length);
			//initialize
			for (int i = 0; i < tweets.length; i++) {
				labels.add(null);
			}
			// find the rank
			for (int j = 0; j < allHLText.size(); j++) {
				System.out.println("Calculating: " + allHLText.get(j));
				for (int i = 0; i < allTweets.size(); i++) {
					double sim = Cosine.getCosine(i, j, allTweets, allHLs);
					if (sim >= STRONGRELEVANT) {
						labels.set(i, 1);
						System.out.println("\t[Strongly], line: " + i	+ "\t" + tweets[i]);
					} else if (sim >= RELEVANT) {
						if (labels.get(i) == null) {
							labels.set(i, 2);
							System.out.println("\t[Relevant], line: " + i	+ "\t" + tweets[i]);
						} else if (labels.get(i) > 2) {
							//if it is judged weak relevant, change to relevant
							labels.set(i, 2);
							System.out.println("\t[Relevant] from [WEAK], line: " + i	+ "\t" + tweets[i]);
						}
					} else if (sim >= WEAKRELEVANT) {
						//set if the label is null
						//other conditions, do not need since the label should be 1, 2, or 3
						if (labels.get(i) == null) {
							labels.set(i, 3);
							System.out.println("\t[WEAK], line: " + i	+ "\t" + tweets[i]);
						}
					}
				}
			}
			
			int count = 0;
			for (int i = 0; i < labels.size(); i++) {
				if (labels.get(i) == null) {
					labels.set(i, 4);
					count++;
				}
			}
			System.out.println(count + " tweets are ranked to the bottom");
			ObjectTools.writeToFile(labels, outPath + file.getName());
		}
	}
	
	public static void main(String[] args) {

		try {
			System.setOut(new PrintStream(new File("./logs/GenerateGoldT_Rank.out.log"), "UTF-8"));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			e.printStackTrace();
		}
		
		String hlrankPath = "./data/_newData/gold/";

		String tPath = "data/_newData/plainText_filtered/";
		
		String outPath = "data/_newData/goldTweetsRank/";
		
		GenerateGoldT_Rank ggt = new GenerateGoldT_Rank();
		ggt.run(tPath, hlrankPath, outPath);
	}
}
