package exp.ranking.fe;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.TreeMap;
import java.util.Vector;
import java.util.regex.Pattern;

import opennlp.tools.tokenize.Tokenizer;
import tools.FileReaderEnhance;
import tools.FileWriterEnhance;
import tools.MapTools;
import tools.ObjectTools;
import tools.nlp.TokenizerSimple;
import tools.twitter.TweetTidy;
import tools.twitter.URLTools;
import cc.mallet.pipe.CharSequence2TokenSequence;
import cc.mallet.pipe.CharSequenceLowercase;
import cc.mallet.pipe.Pipe;
import cc.mallet.pipe.SerialPipes;
import cc.mallet.pipe.TokenSequence2FeatureSequence;
import cc.mallet.pipe.TokenSequenceRemoveStopwords;
import cc.mallet.topics.ParallelTopicModel;
import cc.mallet.topics.TopicInferencer;
import cc.mallet.types.Instance;
import cc.mallet.types.InstanceList;
import edu.hit.irlab.nlp.stopwords.StopWords;

/**
 * Feature extraction for learning to rank problem
 * 
 * @author lzhou
 * 
 */
public class FE_L2R {

	// LDA parameters
	static double alphaSum = 0.1;
	static double beta = 0.01;
	static int iterations = 1000;

	// path
	static String tPATH = "data/_newData/plainText_filtered/";
	static String tNERPATH = "data/_newData/plainText_filtered_NER/";
	static String fPATH = "data/_newData/L2R/";
	static String rPATH = "data/_newData/goldTweetsRank/";

	// initial
	static Vector<String> sw = (new StopWords("data/stopwords.dat")).getStopwords();
	static HashMap<String, Boolean> stopwords = new HashMap<String, Boolean>();
	static {
		for (String stopword : sw) {
			stopwords.put(stopword, true);
		}
	}

	static TokenizerSimple ts = new TokenizerSimple();
	static Tokenizer tokenizer = ts.getTokenizer();

	public static void run(int numTopics, String TOPIC) {
		ArrayList<ArrayList<Double>> allFeatures = new ArrayList<>();
		String[] tweets = FileReaderEnhance.readToString(tPATH + TOPIC, "UTF-8").split("\n");
		// normalization?

		// POS-tagging, NER etc.

		/*
		 * Feature 1
		 */
		System.err.print("1. Starting Hybrid-TF-IDF calculation...");
		HashMap<String, Double> tf = new HashMap<>();
		HashMap<String, Double> idf = new HashMap<>();
		HashMap<String, Double> df = new HashMap<>();
		HashMap<String, Double> tfidf = new HashMap<>();
		// Hybrid tf-idf score for this tweet in a specific topic
		// Stop words are deleted
		for (int i = 0; i < tweets.length; i++) {
			String[] elements = tweets[i].split("\t;;\t");
			tweets[i] = elements[0];
		}

		// get tf
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			String tokens[] = tokenizer.tokenize(tweet);
			if (tokens.length == 0) {
				continue;
			}
			HashMap<String, Boolean> temp = new HashMap<>();
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					MapTools.add(tf, token);
					if (temp.containsKey(token)) {
						continue;
					} else {
						temp.put(token, true);
						MapTools.add(df, token);
					}
				}
			}
		}

		// get idf
		Double numberOfLine = (double) tweets.length;
		for (String key : df.keySet()) {
			idf.put(key, Math.log(numberOfLine / df.get(key)));
			tfidf.put(key, tf.get(key) * idf.get(key));
		}

		// get scores for all tweets
		// normalize the scores to [0,1]
		ArrayList<Double> f1 = new ArrayList<>();
		Double max = -1.0;
		for (int i = 0; i < tweets.length; i++) {
			String line = tweets[i];
			line = TweetTidy.doTidyAll(line).toLowerCase();
			String tokens[] = tokenizer.tokenize(line);
			if (tokens.length == 0) {
				f1.add(0.0);
				continue;
			}
			double score = 0.0;
			double length = 0.0;
			for (String token : tokens) {
				if (stopwords.containsKey(token)) {
					continue;
				} else {
					score += tfidf.get(token);
					length++;
				}
			}
			if (length == 0) {
				f1.add(0.0);
				continue;
			}
			// normalization
			score = score / length;
			if (score > max) {
				max = score;
			}
			f1.add(score);
		}
		for (int i = 0; i < f1.size(); i++) {
			f1.set(i, f1.get(i) / max);
		}
		allFeatures.add(f1);
		System.err.println("Done!");

		/*
		 * Feature 2 : judge whether the tweet contains a quotation
		 * Feature 2_1: if the tweet starts with a quotation
		 */
		System.err.print("2. Starting quotation finding...");
		ArrayList<Double> f2 = new ArrayList<>();
		ArrayList<Double> f2_1 = new ArrayList<>();
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			if (tweet.contains("\"")) {
				f2.add(1.0);
			} else {
				f2.add(0.0);
			}
			if (tweet.startsWith("\"")) {
				f2_1.add(1.0);
			} else {
				f2_1.add(0.0);
			}
		}
		allFeatures.add(f2);
		System.err.println("Done!");

		/*
		 * Feature 3 : whether there is a url in the tweet
		 */
		System.err.print("3. Starting url finding...");
		ArrayList<Double> f3 = new ArrayList<>();
		for (String tweet : tweets) {
			tweet = TweetTidy.doTidyHTML(tweet);
			ArrayList<String> urls = URLTools.getURLs(tweet);
			if (urls.size() > 0) {
				f3.add(1.0);
			} else {
				f3.add(0.0);
			}
		}
		allFeatures.add(f3);
		System.err.println("Done!");

		/*
		 * Feature 4 : LDA related feature We fix the clustering number to 100
		 * all parameters of LDA is set to default
		 */
		System.err.println("4. Starting the LDA calculation...");
		// Step 1. Find frequent Background words in this topic. Find the top 10
		// words
		List<String> bWords = new ArrayList<>(10);
		TreeMap<Double, ArrayList<String>> sortedDF = new TreeMap<>();
		for (String key : df.keySet()) {
			if (sortedDF.containsKey(df.get(key))) {
				ArrayList<String> words = sortedDF.get(df.get(key));
				words.add(key);
			} else {
				ArrayList<String> words = new ArrayList<>();
				words.add(key);
				sortedDF.put(df.get(key), words);
			}
		}
		System.err.print("\tTop 5 background words: ");
		int count = 0;
		for (Double freq : sortedDF.descendingKeySet()) {
			if (count < 5) {
				for (String word : sortedDF.get(freq)) {
					bWords.add(word);
					System.err.print(word + "(" + freq + ")\t");
					count++;
				}
			}
		}
		System.err.println();
		// Step 2. LDA calculation
		// Begin by importing documents from text to feature sequences
		ArrayList<Pipe> pipeList = new ArrayList<Pipe>();

		// Pipes: lowercase, tokenize, remove stopwords, map to features
		pipeList.add(new CharSequenceLowercase());
		pipeList.add(new StemmingPipe());
		pipeList.add(new CharSequence2TokenSequence(Pattern.compile("\\p{L}[\\p{L}\\p{P}]+\\p{L}")));
		pipeList.add(new KWFilterPipe(bWords));
		pipeList.add(new TokenSequenceRemoveStopwords(new File("data/stopwords.dat"), "UTF-8", true, false, false));
		pipeList.add(new TokenSequence2FeatureSequence());

		InstanceList tweetsInstances = new InstanceList(new SerialPipes(pipeList));

		for (String tweet : tweets) {
			String[] elements = tweet.split("\t;;\t");
			String ldaContent = TweetTidy.doTidyAll(elements[0]);
			tweetsInstances.addThruPipe(new Instance(ldaContent, "0", "cs", ""));
		}

		// Create a model with 100 topics, alpha_t = 0.01, beta_w = 0.01
		// Note that the first parameter is passed as the sum over topics, while
		// the second is the parameter for a single dimension of the Dirichlet
		// prior.
		ParallelTopicModel model = new ParallelTopicModel(numTopics, alphaSum, beta);

		model.addInstances(tweetsInstances);

		// Use 2 parallel samplers, which each look at one half the corpus and
		// combine
		// statistics after every iteration.
		model.setNumThreads(2);

		// Run the model for 1000 iterations and stop. For real applications,
		// use 1000 to 2000 iterations.
		model.setNumIterations(iterations);
		try {
			model.estimate();
		} catch (IOException e) {
			e.printStackTrace();
		}

		// Calculate the probabilities of tweets to the calculated topics
		System.err.println("4. Calculate the probabilities of tweets to the LDA topics...");
		ArrayList<ArrayList<Double>> f4 = new ArrayList<>();
		TopicInferencer inferencer = model.getInferencer();
		InstanceList tInstances = new InstanceList(new SerialPipes(pipeList));
		for (String tweet : tweets) {
			tInstances.addThruPipe(new Instance(TweetTidy.doTidyAll(tweet), "0", "cs", tweet));
		}

		ArrayList<Double> f4_1 = new ArrayList<>();
		ArrayList<Double> f4_2 = new ArrayList<>();
		double maxEntropy = Math.log(numTopics);
		for (int i = 0; i < tInstances.size(); i++) {
			Instance inst = tInstances.get(i);
			double[] testProbabilities = inferencer.getSampledDistribution(inst, 10, 1, 5);
			// The largest probability of this instance belongs to a topic
			// generated by LDA
			double maxprob = -1;
			for (double prob : testProbabilities) {
				if (prob > maxprob) {
					maxprob = prob;
				}
			}
			f4_1.add(maxprob);
			// calculate the entropy of the distribution.
			double entropy = 0.0;
			for (double prob : testProbabilities) {
				entropy += -prob * Math.log(prob);
			}
			f4_2.add(entropy / maxEntropy);
		}
		f4.add(f4_1);
		f4.add(f4_2);
		allFeatures.addAll(f4);
		System.err.println("Done!");

		/*
		 * Feature 5. the tweet contain how many background words
		 */
		System.err.println("5. Calculate the the tweet contain how many background words...");
		ArrayList<Double> f5 = new ArrayList<>();
		for (int i = 0; i < tweets.length; i++) {
			String tweet = tweets[i];
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			String tokens[] = tokenizer.tokenize(tweet);
			int foundNum = 0;
			for (String bword : bWords) {
				boolean found = false;
				for (String word : tokens) {
					if (bword.equals(word)) {
						found = true;
						break;
					}
				}
				if (found) {
					foundNum++;
				}
			}
			f5.add(foundNum / 5.0);
		}
		allFeatures.add(f5);
		System.err.println("Done!");

		/*
		 * Feature 6. the length of the tweet
		 */
		System.err.println("6. Calculate the length of tweets...");
		ArrayList<Double> f6 = new ArrayList<>();
		for (int i = 0; i < tweets.length; i++) {
			String tweet = tweets[i];
			tweet = TweetTidy.doTidyAll(tweet).toLowerCase();
			String tokens[] = tokenizer.tokenize(tweet);
			f6.add((double) tokens.length);
		}
		allFeatures.add(f6);
		System.err.println("Done!");

		/*
		 * Feature 7. Are there any NEs mentioned 3 class Location, Person,
		 * Organization 4 class Location, Person, Organization, Misc 7 class
		 * Time, Location, Organization, Person, Money, Percent, Date
		 */
		System.err.println("7. Calculate the appearance of NEs...");
		@SuppressWarnings("unchecked")
		ArrayList<ArrayList<String>> allNETags = (ArrayList<ArrayList<String>>) ObjectTools.readFromFile(tNERPATH
				+ TOPIC);
		ArrayList<ArrayList<Double>> f7 = new ArrayList<>(7);
		for (int i = 0; i < 7; i++) {
			f7.add(new ArrayList<Double>());
		}
		HashMap<String, Integer> NERMapping = new HashMap<>();
		NERMapping.put("TIME", 1);
		NERMapping.put("LOCATION", 2);
		NERMapping.put("ORGANIZATION", 3);
		NERMapping.put("PERSON", 4);
		NERMapping.put("MONEY", 5);
		NERMapping.put("PERCENT", 6);
		NERMapping.put("DATE", 7);

		for (int i = 0; i < allNETags.size(); i++) {
			boolean[] flags = new boolean[7];
			for (String tag : allNETags.get(i)) {
				if (NERMapping.containsKey(tag)) {
					flags[NERMapping.get(tag) - 1] = true;
				}
			}
			for (int j = 0; j < 7; j++) {
				if (flags[j]) {
					f7.get(j).add(1.0);
				} else {
					f7.get(j).add(0.0);
				}
			}
		}
		allFeatures.addAll(f7);
		System.err.println("Done!");

		// output features for all tweets
		StringBuilder fOutput = new StringBuilder();
		@SuppressWarnings("unchecked")
		ArrayList<Integer> labels = (ArrayList<Integer>) ObjectTools.readFromFile(rPATH + TOPIC);
		for (int i = 0; i < tweets.length; i++) {
			fOutput.append(labels.get(i));
			fOutput.append(' ');
			fOutput.append("qid:1 ");

			for (int j = 0; j < allFeatures.size(); j++) {
				ArrayList<Double> feature = allFeatures.get(j);
				fOutput.append(j + 1);
				fOutput.append(":");
				fOutput.append(String.format("%.3f", feature.get(i)));
				fOutput.append(' ');
			}

			fOutput.append('\n');
		}
		FileWriterEnhance fwe = new FileWriterEnhance(fPATH + TOPIC + "_" + numTopics, "UTF-8");
		fwe.WriteToFile(fOutput.toString());
	}

	public static void main(String[] args) {
		try {
			System.setOut(new PrintStream(new File("./logs/fe_L2R.out.log"), "UTF-8"));
			System.setErr(new PrintStream(new File("./logs/fe_L2R.err.log"), "UTF-8"));
		} catch (FileNotFoundException e) {
			e.printStackTrace();
		} catch (UnsupportedEncodingException e) {
			e.printStackTrace();
		}

		String TOPIC = "Russia meteor";

		// number of topics go through 20 to 200
		for (int i = 20; i <= 200; i += 20) {
			FE_L2R.run(i, TOPIC);
		}
	}
}
