package exp.baseline;

import java.io.File;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.TreeMap;
import java.util.Vector;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.FileWriterEnhance;
import tools.MapTools;
import tools.nlp.TokenizerSimple;
import tools.similarity.Cosine;
import tools.twitter.TweetTidy;
import edu.hit.irlab.nlp.stopwords.StopWords;

public class HybridTFIDF {
	static final int topN = 200;
	static final double simThreshold = 0.5;
	static Vector<String> sw = (new StopWords("data/stopwords.dat")).getStopwords();
	static HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();

	static HashMap<String, Double> tf = new HashMap<>();
	static HashMap<String, Double> idf = new HashMap<>();
	static HashMap<String, Double> df = new HashMap<>();
	static HashMap<String, Double> tfidf = new HashMap<>();

	static {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
	}
	
	static String filePath = "data/_newData/plainText_filtered/";
	static String outputPath = "data/_newData/baseline/HybridTFIDF/";

	public static void main(String[] args) {
		for (File file : (new File(filePath)).listFiles()) {
			System.err.println("Processing: " + file.getName());
			String[] lines = FileReaderEnhance.readToString(file, "UTF-8").split("\n");

			TokenizerSimple ts = new TokenizerSimple();
			Tokenizer tokenizer = ts.getTokenizer();

			for (int i = 0; i < lines.length; i++) {
				lines[i] = TweetTidy.doTidyAll(lines[i]).toLowerCase();
			}

			// get tf
			for (String line : lines) {
				String tokens[] = tokenizer.tokenize(line);
				//at least 2 tokens
				if (tokens.length < 3) {
					continue;
				}
				HashMap<String, Boolean> temp = new HashMap<>();
				for (String token : tokens) {
					if (stopwords.containsKey(token)) {
						continue;
					} else {
						MapTools.add(tf, token);
						if (temp.containsKey(token)) {
							continue;
						} else {
							temp.put(token, true);
							MapTools.add(df, token);
						}
					}
				}
			}

			// get idf
			Double numberOfLine = (double) lines.length;
			for (String key : df.keySet()) {
				idf.put(key, Math.log(numberOfLine / df.get(key)));
				tfidf.put(key, tf.get(key) * idf.get(key));
			}

			TreeMap<Double, String> sorted = new TreeMap<>();

			// get scores for all tweets
			for (int i = 0; i < lines.length; i++) {
				String line = lines[i];
				String tokens[] = tokenizer.tokenize(line);
				if (tokens.length < 3) {
					continue;
				}
				double score = 0.0;
				double length = 0.0;
				for (String token : tokens) {
					if (stopwords.containsKey(token)) {
						continue;
					} else {
						score += tfidf.get(token);
						length++;
					}
				}
				if (length == 0) {
					continue;
				}
				// normalization
				score = score / length;

				// sort
				if (sorted.containsKey(score)) {
					sorted.put(score, sorted.get(score) + "\t" + i);
				} else {
					sorted.put(score, (String.valueOf(i)));
				}
			}

			ArrayList<String> alreadyOutput = new ArrayList<>();
			aaa: for (Double score : sorted.descendingKeySet()) {
				String[] tweets = sorted.get(score).split("\t");
				bbb: for (String tweet : tweets) {
					Integer lineNum = Integer.parseInt(tweet);
					boolean flag = false;
					for (String output : alreadyOutput) {
						if (Cosine.getCosine(tokenizer.tokenize(lines[lineNum]), tokenizer.tokenize(output), stopwords) > simThreshold) {
							flag = true;
							continue bbb;
						}
					}
					if (flag) {
						continue;
					} else {
						System.out.println(score + "\t\t" + lines[lineNum]);
						alreadyOutput.add(lines[lineNum]);
						if (alreadyOutput.size() == topN) {
							break aaa;
						}
					}
				}
			}
			StringBuilder sb = new StringBuilder();
			for (String output : alreadyOutput) {
				sb.append(output);
				sb.append('\n');
			}
			FileWriterEnhance fwe = new FileWriterEnhance(outputPath + file.getName(), "UTF-8");
			fwe.WriteToFile(sb.toString());
		}
	}
}
