package exp.ranking.fe;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.Formatter;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Locale;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.Vector;
import java.util.regex.Pattern;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.FileWriterEnhance;
import tools.MapTools;
import tools.ObjectTools;
import tools.nlp.TokenizerSimple;
import tools.twitter.TweetTidy;
import tools.twitter.URLTools;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.CharSequenceLowercase;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceRemoveStopwords;
import cc.mallet.topics.ParallelTopicModel;
import cc.mallet.topics.TopicInferencer;
import cc.mallet.types.Alphabet;
import cc.mallet.types.IDSorter;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import edu.hit.irlab.nlp.stopwords.StopWords;
import exp.URLContent1;

/**
 * Feature extraction for classification problem
 * @author lzhou
 *
 */
public class FE {

	// LDA parameters
	static double alphaSum = 1.0;
	static double beta = 0.01;
	static int iterations = 1000;

	// path
	static String tPATH = "data/_newData/plainText_filtered/";
	static String fPATH = "data/_newData/L2C/";

	// initial
	static Vector<String> sw = (new StopWords("data/stopwords.dat"))
			.getStopwords();
	static HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();
	static {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
	}

	static TokenizerSimple ts = new TokenizerSimple();
	static Tokenizer tokenizer = ts.getTokenizer();

	public static void run(int numTopics, String TOPIC) {
		String[] tweets = FileReaderEnhance
				.readToString(tPATH + TOPIC, "UTF-8").split("\n");
		// normalization?

		// POS-tagging, NER etc.

		/*
		 * Feature 1
		 */
		System.err.print("1. Starting Hybrid-TF-IDF calculation...");
		HashMap<String, Double> tf = new HashMap<>();
		HashMap<String, Double> idf = new HashMap<>();
		HashMap<String, Double> df = new HashMap<>();
		HashMap<String, Double> tfidf = new HashMap<>();
		// Hybrid tf-idf score for this tweet in a specific topic
		// Stop words are deleted
		for (int i = 0; i < tweets.length; i++) {
			String[] elements = tweets[i].split("\t;;\t");
			tweets[i] = elements[0];
		}

		// get tf
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			String tokens[] = tokenizer.tokenize(tweet);
			if (tokens.length == 0) {
				continue;
			}
			HashMap<String, Boolean> temp = new HashMap<>();
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					MapTools.add(tf, token);
					if (temp.containsKey(token)) {
						continue;
					} else {
						temp.put(token, true);
						MapTools.add(df, token);
					}
				}
			}
		}

		// get idf
		Double numberOfLine = (double) tweets.length;
		for (String key : df.keySet()) {
			idf.put(key, Math.log(numberOfLine / df.get(key)));
			tfidf.put(key, tf.get(key) * idf.get(key));
		}

		// get scores for all tweets
		// normalize the scores to [0,1]
		ArrayList<Double> f1 = new ArrayList<>();
		Double max = -1.0;
		for (int i = 0; i < tweets.length; i++) {
			String line = tweets[i];
			line = TweetTidy.doTidyAll(line).toLowerCase();
			String tokens[] = tokenizer.tokenize(line);
			if (tokens.length == 0) {
				f1.add(0.0);
				continue;
			}
			double score = 0.0;
			double length = 0.0;
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					score += tfidf.get(token);
					length++;
				}
			}
			if (length == 0) {
				f1.add(0.0);
				continue;
			}
			// normalization
			score = score / length;
			if (score > max) {
				max = score;
			}
			f1.add(score);
		}
		for (int i = 0; i < f1.size(); i++) {
			f1.set(i, f1.get(i) / max);
		}
		System.err.println("Done!");

		/*
		 * Feature 2 : judge whether the tweet contains a quotation
		 */
		System.err.print("2. Starting quotation finding...");
		ArrayList<Double> f2 = new ArrayList<>();
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			if (tweet.contains("\"")) {
				f2.add(1.0);
			} else {
				f2.add(0.0);
			}
		}
		System.err.println("Done!");

		/*
		 * Feature 3 : whether there is a url in the tweet
		 */
		System.err.print("3. Starting url finding...");
		ArrayList<Double> f3 = new ArrayList<>();
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyHTML(tweet);
			ArrayList<String> urls = URLTools.getURLs(tweet);
			if (urls.size() > 0) {
				f3.add(1.0);
			} else {
				f3.add(0.0);
			}
		}
		System.err.println("Done!");

		/*
		 * Feature 4 : LDA related feature We fix the clustering number to 100
		 * all parameters of LDA is set to default
		 */
		System.err.println("4. Starting the LDA calculation...");
		// Step 1. Find frequent Background words in this topic. Find the top 10
		// words
		List<String> bWords = new ArrayList<>(10);
		TreeMap<Double, ArrayList<String>> sortedDF = new TreeMap<>();
		for (String key : df.keySet()) {
			if (sortedDF.containsKey(df.get(key))) {
				ArrayList<String> words = sortedDF.get(df.get(key));
				words.add(key);
			} else {
				ArrayList<String> words = new ArrayList<>();
				words.add(key);
				sortedDF.put(df.get(key), words);
			}
		}
		System.err.print("\tTop 5 background words: ");
		int count = 0;
		for (Double freq : sortedDF.descendingKeySet()) {
			if (count < 5) {
				for (String word : sortedDF.get(freq)) {
					bWords.add(word);
					System.err.print(word + "(" + freq + ")\t");
					count++;
				}
			}
		}
		System.err.println();
		// Step 2. LDA calculation
		// Begin by importing documents from text to feature sequences
		ArrayList<Pipe> pipeList = new ArrayList<Pipe>();

		// Pipes: lowercase, tokenize, remove stopwords, map to features
		pipeList.add(new CharSequenceLowercase());
		pipeList.add(new StemmingPipe());
		pipeList.add(new CharSequence2TokenSequence(Pattern
				.compile("\\p{L}[\\p{L}\\p{P}]+\\p{L}")));
		pipeList.add(new KWFilterPipe(bWords));
		pipeList.add(new TokenSequenceRemoveStopwords(new File(
				"data/stopwords.dat"), "UTF-8", true, false, false));
		pipeList.add(new TokenSequence2FeatureSequence());

		InstanceList tweetsInstances = new InstanceList(new SerialPipes(
				pipeList));

		for (String tweet : tweets) {
			String[] elements = tweet.split("\t;;\t");
			String ldaContent = TweetTidy.doTidyAll(elements[0]);
			tweetsInstances
					.addThruPipe(new Instance(ldaContent, "0", "cs", ""));
		}

		// Create a model with 100 topics, alpha_t = 0.01, beta_w = 0.01
		// Note that the first parameter is passed as the sum over topics, while
		// the second is the parameter for a single dimension of the Dirichlet
		// prior.
		ParallelTopicModel model = new ParallelTopicModel(numTopics, alphaSum,
				beta);

		model.addInstances(tweetsInstances);

		// Use 2 parallel samplers, which each look at one half the corpus and
		// combine
		// statistics after every iteration.
		model.setNumThreads(2);

		// Run the model for 1000 iterations and stop. For real applications,
		// use 1000 to 2000 iterations.
		model.setNumIterations(iterations);
		try {
			model.estimate();
		} catch (IOException e) {
			e.printStackTrace();
		}

		// Calculate the probabilities of highlights to the calculated topics
		System.err
				.println("4. Calculate the probabilities of highlights to the LDA topics...");
		// The data alphabet maps word IDs to strings
		Alphabet dataAlphabet = tweetsInstances.getDataAlphabet();
		// Get an array of sorted sets of word ID/count pairs
		ArrayList<TreeSet<IDSorter>> topicSortedWords = model.getSortedWords();
		InstanceList hlInstances = new InstanceList(new SerialPipes(pipeList));
		TopicInferencer inferencer = model.getInferencer();
		@SuppressWarnings("unchecked")
		ArrayList<URLContent1> ucs = (ArrayList<URLContent1>) ObjectTools
				.readFromFile("data/_newData/obj/highlights_contents_ROUGE/"
						+ TOPIC);
		for (URLContent1 uc : ucs) {
			ArrayList<String> hls = uc.highlights;
			for (String hl : hls) {
				hlInstances.addThruPipe(new Instance(TweetTidy.doTidyAll(hl),
						"0", "cs", hl));
			}
		}

		// get the mapping between highlights and LDA topics
		ArrayList<ArrayList<InstanceProb>> t2h = new ArrayList<>(numTopics);
		for (int i = 0; i < numTopics; i++) {
			t2h.add(new ArrayList<InstanceProb>());
		}
		for (int i = 0; i < hlInstances.size(); i++) {
			Instance inst = hlInstances.get(i);
			double[] testProbabilities = inferencer.getSampledDistribution(
					inst, 10, 1, 5);
			System.out.println(inst.getSource());
			System.out.println(inst.getData());
			// find the highest probability
			double m = -1.0;
			int t = -1;
			for (int j = 0; j < testProbabilities.length; j++) {
				if (testProbabilities[j] > m) {
					m = testProbabilities[j];
					t = j;
				}
			}

			t2h.get(t).add(new InstanceProb(i, m));

			System.out.println(t + "\t" + m);
			Iterator<IDSorter> iterator = topicSortedWords.get(t).iterator();
			Formatter out = new Formatter(new StringBuilder(), Locale.US);
			out.format("%d\t%.3f\t", t, testProbabilities[t]);
			int rank = 0;
			while (iterator.hasNext() && rank < 20) {
				IDSorter idCountPair = iterator.next();
				out.format("%s (%.0f) ",
						dataAlphabet.lookupObject(idCountPair.getID()),
						idCountPair.getWeight());
				rank++;
			}
			System.out.println(out);
			System.out.println();
		}

		for (int i = 0; i < t2h.size(); i++) {
			ArrayList<InstanceProb> hls = t2h.get(i);
			System.out.println("Topic: " + i);
			Iterator<IDSorter> iterator = topicSortedWords.get(i).iterator();
			Formatter out = new Formatter(new StringBuilder(), Locale.US);
			int rank = 0;
			while (iterator.hasNext() && rank < 20) {
				IDSorter idCountPair = iterator.next();
				out.format("%s (%.0f) ",
						dataAlphabet.lookupObject(idCountPair.getID()),
						idCountPair.getWeight());
				rank++;
			}
			System.out.println(out);
			for (InstanceProb tp : hls) {
				Instance ins = hlInstances.get(tp.instNum);
				System.out.println("\t" + tp.prob + "\t" + ins.getSource());
			}
			System.out.println();
		}

		ArrayList<double[]> f4 = new ArrayList<>();
		InstanceList tInstances = new InstanceList(new SerialPipes(pipeList));
		for (String tweet : tweets) {
			tInstances.addThruPipe(new Instance(TweetTidy.doTidyAll(tweet),
					"0", "cs", tweet));
		}

		for (int i = 0; i < tInstances.size(); i++) {
			Instance inst = tInstances.get(i);
			double[] testProbabilities = inferencer.getSampledDistribution(
					inst, 10, 1, 5);
			f4.add(testProbabilities);
		}
		System.err.println("Done!");

		// output features for all tweets
		StringBuilder fOutput = new StringBuilder();
		@SuppressWarnings("unchecked")
		ArrayList<Integer> labels = (ArrayList<Integer>) ObjectTools.readFromFile("data/_newData/goldTweets/" + TOPIC);
		for (int i = 0; i < tweets.length; i++) {
			fOutput.append(labels.get(i));
			fOutput.append(' ');
			fOutput.append("1:");
			fOutput.append(String.format("%.3f", f1.get(i)));
			fOutput.append(' ');
			fOutput.append("2:");
			fOutput.append(f2.get(i));
			fOutput.append(' ');
			fOutput.append("3:");
			fOutput.append(f3.get(i));
			fOutput.append(' ');

			double[] probs = f4.get(i);
			for (int j = 4; j < (4 + numTopics); j++) {
				fOutput.append(j);
				fOutput.append(":");
				fOutput.append(String.format("%.3f", probs[j - 4]));
				fOutput.append(' ');
			}

			fOutput.append('\n');
		}
		FileWriterEnhance fwe = new FileWriterEnhance(fPATH + TOPIC + "_" + numTopics, "UTF-8");
		fwe.WriteToFile(fOutput.toString());
	}
	
	public static void main(String[] args) {
		try {
			System.setOut(new PrintStream(new File("./logs/fe.out.log"),
					"UTF-8"));
			// System.setErr(new PrintStream(new File("./logs/fe.err.log"),
			// "UTF-8"));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			e.printStackTrace();
		}
		
		String TOPIC = "connecticut_shooting";
		
		for (int i = 60; i <= 200; i+= 20) {
			FE.run(i, TOPIC);
		}
	}
}
