package exp.ranking.observ;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.TreeMap;
import java.util.Vector;
import java.util.regex.Pattern;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.FileWriterEnhance;
import tools.MapTools;
import tools.ObjectTools;
import tools.nlp.TokenizerSimple;
import tools.twitter.TweetTidy;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.CharSequenceLowercase;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceRemoveStopwords;
import cc.mallet.topics.ParallelTopicModel;
import cc.mallet.topics.TopicInferencer;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import edu.hit.irlab.nlp.stopwords.StopWords;
import exp.ranking.fe.KWFilterPipe;
import exp.ranking.fe.StemmingPipe;

public class LDA_HL{

	// LDA parameters
	double alphaSum = 0.1;
	double beta = 0.01;
	int iterations = 1000;
	int numTopics = -1;
	String topic = "";

	// initial
	Vector<String> sw = (new StopWords("data/stopwords.dat")).getStopwords();
	HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();

	TokenizerSimple ts = new TokenizerSimple();
	Tokenizer tokenizer = ts.getTokenizer();

	public int getNumTopics() {
		return numTopics;
	}

	public void setNumTopics(int numTopics) {
		this.numTopics = numTopics;
	}

	public String getTopic() {
		return topic;
	}

	public void setTopic(String topic) {
		this.topic = topic;
	}

	public LDA_HL() {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
//		try {
//			System.setOut(new PrintStream(new File("./logs/LDA_HL" + topic + "_" + numTopics + ".out.log"),
//					"UTF-8"));
//			System.setErr(new PrintStream(new File("./logs/LDA_HL" + topic + "_" + numTopics + ".err.log"),
//					"UTF-8"));
//		} catch (FileNotFoundException e) {
//			e.printStackTrace();
//		} catch (UnsupportedEncodingException e) {
//			e.printStackTrace();
//		}
	}

	public void run() {
		String[] tweets = FileReaderEnhance.readToString(
				"data/_newData/plainText/" + topic, "UTF-8").split("\n");
		System.err.print("Starting Hybrid-TF-IDF calculation...");
		HashMap<String, Double> tf = new HashMap<>();
		HashMap<String, Double> idf = new HashMap<>();
		HashMap<String, Double> df = new HashMap<>();
		HashMap<String, Double> tfidf = new HashMap<>();
		StringBuilder output = new StringBuilder();
		// Hybrid tf-idf score for this tweet in a specific topic
		// Stop words are deleted
		for (int i = 0; i < tweets.length; i++) {
			String[] elements = tweets[i].split("\t;;\t");
			tweets[i] = elements[0];
		}

		// get tf
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			String tokens[] = tokenizer.tokenize(tweet);
			if (tokens.length == 0) {
				continue;
			}
			HashMap<String, Boolean> temp = new HashMap<>();
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					MapTools.add(tf, token);
					if (temp.containsKey(token)) {
						continue;
					} else {
						temp.put(token, true);
						MapTools.add(df, token);
					}
				}
			}
		}

		// get idf
		Double numberOfLine = (double) tweets.length;
		for (String key : df.keySet()) {
			idf.put(key, Math.log(numberOfLine / df.get(key)));
			tfidf.put(key, tf.get(key) * idf.get(key));
		}
		System.err.println("Done!");

		System.err.println("Starting LDA calculation...");
		// Step 1. Find frequent Background words in this topic. Find the top 10
		// words
		List<String> bWords = new ArrayList<>(10);
		TreeMap<Double, ArrayList<String>> sortedDF = new TreeMap<>();
		for (String key : df.keySet()) {
			if (sortedDF.containsKey(df.get(key))) {
				ArrayList<String> words = sortedDF.get(df.get(key));
				words.add(key);
			} else {
				ArrayList<String> words = new ArrayList<>();
				words.add(key);
				sortedDF.put(df.get(key), words);
			}
		}
		output.append("\tTop 5 background words: \n");
		int count = 0;
		for (Double freq : sortedDF.descendingKeySet()) {
			if (count < 5) {
				for (String word : sortedDF.get(freq)) {
					bWords.add(word);
					output.append(word + "(" + freq + ")\t");
					count++;
				}
			}
		}
		output.append("\n");
		// Step 2. LDA calculation
		// Begin by importing documents from text to feature sequences
		ArrayList<Pipe> pipeList = new ArrayList<Pipe>();

		// Pipes: lowercase, tokenize, remove stopwords, map to features
		pipeList.add(new CharSequenceLowercase());
		pipeList.add(new StemmingPipe());
		pipeList.add(new CharSequence2TokenSequence(Pattern
				.compile("\\p{L}[\\p{L}\\p{P}]+\\p{L}")));
		pipeList.add(new KWFilterPipe(bWords));
		pipeList.add(new TokenSequenceRemoveStopwords(new File(
				"data/stopwords.dat"), "UTF-8", true, false, false));
		pipeList.add(new TokenSequence2FeatureSequence());

		InstanceList tweetsInstances = new InstanceList(new SerialPipes(
				pipeList));

		for (String tweet : tweets) {
			String[] elements = tweet.split("\t;;\t");
			String ldaContent = TweetTidy.doTidyAll(elements[0]);
			tweetsInstances
					.addThruPipe(new Instance(ldaContent, "0", "cs", ""));
		}

		// Create a model with 100 topics, alpha_t = 0.01, beta_w = 0.01
		// Note that the first parameter is passed as the sum over topics, while
		// the second is the parameter for a single dimension of the Dirichlet
		// prior.
		ParallelTopicModel model = new ParallelTopicModel(numTopics, alphaSum,
				beta);

		model.addInstances(tweetsInstances);

		// Use 12 parallel samplers based on the CPU number of SEPC397, which each look at one half the corpus and
		// combine
		// statistics after every iteration.
		model.setNumThreads(12);

		// Run the model for 1000 iterations and stop. For real applications,
		// use 1000 to 2000 iterations.
		model.setNumIterations(iterations);
		try {
			model.estimate();
		} catch (IOException e) {
			e.printStackTrace();
		}

		// Calculate the probabilities of tweets to the calculated topics
		System.err
				.println("Calculate the probabilities of highlights to the LDA topics...");
		@SuppressWarnings("unchecked")
		TreeMap<Integer, ArrayList<String>> sorted_hls = (TreeMap<Integer, ArrayList<String>>) ObjectTools
				.readFromFile("./data/_newData/gold/" + topic);
		ArrayList<ArrayList<Double>> f4 = new ArrayList<>();
		TopicInferencer inferencer = model.getInferencer();
		InstanceList tInstances = new InstanceList(new SerialPipes(pipeList));
		ArrayList<String> sorted = new ArrayList<>();
		for (int key : sorted_hls.descendingKeySet()) {
			sorted.addAll(sorted_hls.get(key));
		}
		for (String hl : sorted) {
			tInstances.addThruPipe(new Instance(hl, "0", "cs", hl));
		}

		ArrayList<Double> f4_1 = new ArrayList<>();
		ArrayList<Double> f4_2 = new ArrayList<>();
		double maxEntropy = Math.log(numTopics);
		for (int i = 0; i < tInstances.size(); i++) {
			Instance inst = tInstances.get(i);
			output.append('\n');
			output.append(inst.getSource());
			output.append('\n');
			double[] testProbabilities = inferencer.getSampledDistribution(
					inst, 10, 1, 5);
			// The largest probability of this instance belongs to a topic
			// generated by LDA
			double maxprob = -1;
			for (double prob : testProbabilities) {
				if (prob > maxprob) {
					maxprob = prob;
				}
			}
			f4_1.add(maxprob);
			output.append(maxprob);
			output.append('\n');
			// calculate the entropy of the distribution.
			double entropy = 0.0;
			for (double prob : testProbabilities) {
				entropy += -prob * Math.log(prob);
			}
			f4_2.add(entropy / maxEntropy);
			output.append(entropy / maxEntropy);
			output.append('\n');
		}
		f4.add(f4_1);
		f4.add(f4_2);
		System.err.println("Done!");
		FileWriterEnhance fwe = new FileWriterEnhance("./logs/LDA_HL/" + topic + "_" + numTopics + ".out.log", "UTF-8");
		fwe.WriteToFile(output.toString());
	}

	public static void main(String[] args) {
		String topic = "Boston bombing";
		for (int i = 40; i <= 160; i += 20) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}

		topic = "connecticut shooting";
		for (int i = 40; i <= 160; i += 20) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}
		
		topic = "Oscar 2013";
		for (int i = 40; i <= 160; i += 20) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}
		
		topic = "Obama inauguration";
		for (int i = 40; i <= 160; i += 20) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}
		
		topic = "Royal Wedding";
		for (int i = 40; i <= 160; i += 20) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}
		
		topic = "Russia meteor";
		for (int i = 20; i <= 70; i += 10) {
			LDA_HL lh = new LDA_HL();
			lh.setNumTopics(i);
			lh.setTopic(topic);
			lh.run();
		}
	}
}
