package exp;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.TreeMap;
import java.util.Vector;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.MapTools;
import tools.nlp.TokenizerSimple;
import tools.similarity.Cosine;
import tools.twitter.TweetTidy;
import edu.hit.irlab.nlp.stopwords.StopWords;

/**
 * The Hybrid TF-IDF method without filtering the similar tweets.<br>
 * Only rank all the tweets.
 * 
 * @author Lanjun
 * 
 */
public class EXP1 {
	static final int topN = 10;
	static final double simThreshold = 0.5;
	static Vector<String> sw = (new StopWords("data/stopwords.dat")).getStopwords();
	static HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();

	static HashMap<String, Double> tf = new HashMap<>();
	static HashMap<String, Double> idf = new HashMap<>();
	static HashMap<String, Double> df = new HashMap<>();
	static HashMap<String, Double> tfidf = new HashMap<>();

	static {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
	}

	public static void main(String[] args) {
		String filename = "data/plaintext/9_11_memorial_2011";
		String[] lines = FileReaderEnhance.readToString(filename, "UTF-8").split("\n");

		TokenizerSimple ts = new TokenizerSimple();
		Tokenizer tokenizer = ts.getTokenizer();

		for (int i = 0; i < lines.length; i++) {
			lines[i] = TweetTidy.doTidyAll(lines[i]).toLowerCase();
		}

		// get tf
		for (String line : lines) {
			String tokens[] = tokenizer.tokenize(line);
			if (tokens.length == 0) {
				continue;
			}
			HashMap<String, Boolean> temp = new HashMap<>();
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					MapTools.add(tf, token);
					if (temp.containsKey(token)) {
						continue;
					} else {
						temp.put(token, true);
						MapTools.add(df, token);
					}
				}
			}
		}

		// get idf
		Double numberOfLine = (double) lines.length;
		for (String key : df.keySet()) {
			idf.put(key, Math.log(numberOfLine / df.get(key)));
			tfidf.put(key, tf.get(key) * idf.get(key));
		}

		TreeMap<Double, String> sorted = new TreeMap<>();

		// get scores for all tweets
		for (int i = 0; i < lines.length; i++) {
			String line = lines[i];
			String tokens[] = tokenizer.tokenize(line);
			if (tokens.length == 0) {
				continue;
			}
			double score = 0.0;
			double length = 0.0;
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					score += tfidf.get(token);
					length++;
				}
			}
			if (length == 0) {
				continue;
			}
			// normalization
			score = score / length;

			// sort
			if (sorted.containsKey(score)) {
				sorted.put(score, sorted.get(score) + "\t" + i);
			} else {
				sorted.put(score, (String.valueOf(i)));
			}
		}

		ArrayList<String> alreadyOutput = new ArrayList<>();
		aaa: for (Double score : sorted.descendingKeySet()) {
			String[] tweets = sorted.get(score).split("\t");
			bbb: for (String tweet : tweets) {
				Integer lineNum = Integer.parseInt(tweet);
				boolean flag = false;
				for (String output : alreadyOutput) {
					if (Cosine.getCosine(tokenizer.tokenize(lines[lineNum]), tokenizer.tokenize(output), stopwords) > simThreshold) {
						flag = true;
						continue bbb;
					}
				}
				if (flag) {
					continue;
				} else {
					System.out.println(score + "\t\t" + lines[lineNum]);
					alreadyOutput.add(lines[lineNum]);
					if (alreadyOutput.size() == topN) {
						break aaa;
					}
				}
			}
		}
	}
}
