package org.jiangwei.cmput696.entitylinking.algorithm;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import opennlp.tools.sentdetect.SentenceDetector;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.util.Span;

import org.apache.lucene.document.Document;
import org.apache.lucene.queryParser.ParseException;
import org.jiangwei.cmput696.entitylinking.ELCandidate;
import org.jiangwei.cmput696.entitylinking.ELQueriedDocument;
import org.jiangwei.cmput696.entitylinking.ELQuery;
import org.jiangwei.cmput696.entitylinking.IELAlgorithm;
import org.jiangwei.cmput696.entitylinking.tacindex.TacHelper;
import org.jiangwei.cmput696.entitylinking.tacindex.TacKBService;

public class UseMoreWordsAlgorithm implements IELAlgorithm {

	private static final boolean CAPITAL_ONLY = true;

	private TacKBService tacService;

	public UseMoreWordsAlgorithm() throws IOException {
		tacService = new TacKBService();
	}

	@Override
	public List<ELCandidate> search(ELQuery elQuery) {
		ELQueriedDocument qDoc = elQuery.getDocument();
		String surface = qDoc.getSurface();

		Map<String, Document> candiMap = getCandidateDocuments(surface);

		Set<String> notableWords = new HashSet<String>();
		// find notable words from query document and candidate documents
		// The values of the entries store the "DF" score
		// 1. Find notable words in the query document
		Set<String> words = findNotableInText(surface, qDoc.getTitle() + ". "
				+ qDoc.getText());
		notableWords.addAll(words);

		for (Document doc : candiMap.values()) {
			String docTitle = doc.get(TacHelper.KEY_WIKI_TITLE);
			words = findNotableInText(surface, doc.get(TacHelper.KEY_TEXT));
			notableWords.addAll(words);
		}

		// calculate similarities based on the notable words
		List<ELCandidate> ranked = rank(qDoc, candiMap, notableWords);

		return ranked;
	}

	private Set<String> findNotableInText(String seedStr, String text) {

		SentenceDetector sentenceDetector = AlgoHelper.getSentenceDetector();
		Tokenizer tokenizer = AlgoHelper.getTokenizer();

		Set<String> seeds = new HashSet<String>();
		for (String seed : tokenizer.tokenize(seedStr)) {
			seed = seed.toLowerCase();
			if (seed.length() > 1) {
				addToSet(seeds, seed);
			}
		}

		Set<String> notableSet = new HashSet<String>();
		for (String sent : sentenceDetector.sentDetect(text)) {
			boolean notableSent = false;
			String[] tokens = tokenizer.tokenize(sent);
			for (String token : tokens) {
				if (seeds.contains(token.toLowerCase())) {
					notableSent = true;
					break;
				}
			}
			if (notableSent) {
				for (String token : tokens) {
					if (token.length() > 1
							&& (!CAPITAL_ONLY || Character.isUpperCase(token
									.charAt(0)))) {
						addToSet(notableSet, token.toLowerCase());
					} else if (seeds.contains(token.toLowerCase())) {
						// addToSet(notableSet, token.toLowerCase());
					}
				}
			}
		}

		return notableSet;
	}

	private void addToSet(Set<String> set, String item) {
		if (!set.contains(item)) {
			set.add(item);
		}
	}

	private Map<String, Document> getCandidateDocuments(String surface) {
		// search a2e index
		Set<String> psbNames = AlgoHelper.queryAliasIndex(surface);

		Map<String, Document> candiMap = new HashMap<String, Document>();
		try {
			// add candidates from the index lucene-a2e
			for (String wikiName : psbNames) {
				Document doc = tacService.getDocByWikiName(wikiName);
				if (doc != null) {
					if ("Lucy Walsh".equals(wikiName)) {
						System.out.println("Lucy Walsh as candidate.");
					}
					candiMap.put(wikiName, doc);
				}
			}
			// add candidates from the tac-kb index created by
			// TacIndexConstructor
//			List<Document> list = tacService.searchWikiName(surface);
//			for (Document doc : list) {
//				String wikiName = doc.get(TacHelper.KEY_WIKI_TITLE);
//				if (!candiMap.containsKey(wikiName)) {
//					if ("Lucy Walsh".equals(wikiName)) {
//						System.out.println("Lucy Walsh as candidate.");
//					}
//					candiMap.put(wikiName, doc);
//				}
//			}
		} catch (IOException e) {
			e.printStackTrace();
//		} catch (ParseException e) {
//			e.printStackTrace();
		}
		return candiMap;
	}

	private List<ELCandidate> rank(ELQueriedDocument qDoc,
			Map<String, Document> candiMap, Set<String> notableSet) {

		int N = candiMap.size() + 1;

		Map<String, Double> notableDF = new HashMap<String, Double>();
		for (String word : notableSet) {
			notableDF.put(word, 0.0);
		}

		// get the vector of the query document. Only count the sentences with
		// notable words.
		// the keys are words. the values are the TF-IDF score
		Map<String, Double> qVec = getWordVector(
				qDoc.getTitle() + ". " + qDoc.getText(), notableDF, N);

		List<ELCandidate> result = new ArrayList<ELCandidate>();

		Map<String, Map<String, Double>> cVects = new HashMap<String, Map<String, Double>>();

		for (Map.Entry<String, Document> entry : candiMap.entrySet()) {
			Document doc = entry.getValue();
			String text = doc.get(TacHelper.KEY_WIKI_TITLE) + ". "
					+ doc.get(TacHelper.KEY_TEXT);
			Map<String, Double> cVec = getWordVector(text, notableDF, N);
			cVects.put(entry.getKey(), cVec);
		}

		// calculate TF-IDF scores
		calcTFIDF(qVec, notableDF, N);
		for (Map.Entry<String, Map<String, Double>> entry : cVects.entrySet()) {
			Map<String, Double> cVec = entry.getValue();

			ELCandidate cand = new ELCandidate();
			cand.setAnswer(entry.getKey());
			cand.setScore(getSimilarity(qVec, cVec));

			result.add(cand);
		}

		// sort the results with scores in decreasing order
		Collections.sort(result, new Comparator<ELCandidate>() {
			@Override
			public int compare(ELCandidate arg0, ELCandidate arg1) {
				return arg0.getScore() > arg1.getScore() ? -1 : (arg0
						.getScore() == arg1.getScore() ? 0 : 1);
			}
		});

		result = result.subList(0, Math.min(30, result.size()));

		System.out.println(qDoc.getSurface());
		for (ELCandidate candidate : result) {
			System.out.println(candidate.getScore() + " "
					+ candidate.getAnswer());
		}

		return result;
	}

	/**
	 * Calculate the TF-IDF vector for the given text.
	 * 
	 * @param string
	 * @param notableDF
	 * @param n
	 * @return
	 */
	private Map<String, Double> getWordVector(String text,
			Map<String, Double> notableDF, int n) {

		SentenceDetector sentenceDetector = AlgoHelper.getSentenceDetector();
		Tokenizer tokenizer = AlgoHelper.getTokenizer();

		Map<String, Double> vec = new HashMap<String, Double>();

		StringBuilder sb = new StringBuilder(text);
		Span[] sentSpans = sentenceDetector.sentPosDetect(text);

		Set<String> titleTokens = new HashSet<String>(0);
		boolean first = true;
		for (Span sentSpan : sentSpans) {
			String sent = sb.substring(sentSpan.getStart(), sentSpan.getEnd());
			Map<String, Double> tokenInSent = tokenizeSent(tokenizer, sent,
					notableDF.keySet());
			if (tokenInSent.size() == 0) {
				continue;
			}

			for (Map.Entry<String, Double> entry : tokenInSent.entrySet()) {
				addToVector(vec, entry.getKey(), entry.getValue());
				if (first) {
					addToSet(titleTokens, entry.getKey());
				}
			}
			if (first) {
				first = false;
			}
		}

		// boost the title tokens' TF score
		for (String titleToken : titleTokens) {
			vec.put(titleToken, 1.5 * vec.get(titleToken));
		}

		// update DF scores
		for (String token : vec.keySet()) {
			addToVector(notableDF, token);
		}

		return vec;
	}

	private void calcTFIDF(Map<String, Double> vec,
			Map<String, Double> notableDF, int n) {
		// calculate the TF-IDF scores
		for (String token : vec.keySet()) {
			double tf = vec.get(token);
			double df = notableDF.get(token);
			vec.put(token, getTFIDF(tf, df, n));
			if ("to".equals(token)) {
				System.out.println("<to> tf: " + tf + ", df: " + df
						+ ", tfidf: " + getTFIDF(tf, df, n));
			}
		}
	}

	private Map<String, Double> tokenizeSent(Tokenizer tokenizer, String sent,
			Set<String> notableSet) {
		String[] tokens = tokenizer.tokenize(sent);
		Map<String, Double> map = new HashMap<String, Double>();
		for (String token : tokens) {
			// if (notableSet.contains(token.toLowerCase())) {
			if (token.length() > 1 && Character.isUpperCase(token.charAt(0))) {
				token = token.toLowerCase();
				addToVector(map, token);
			}
		}
		return map;
	}

	private double getSimilarity(Map<String, Double> tfa,
			Map<String, Double> tfb) {
		double score = 0;
		for (Map.Entry<String, Double> entry : tfa.entrySet()) {
			if (tfb.containsKey(entry.getKey())) {
				score += entry.getValue() * tfb.get(entry.getKey());
			}
		}
		return score;
	}

	private Double getTFIDF(Double tf, Double df, double n) {
		return tf * Math.log(n / Math.max(df, 1));
	}

	private void addToVector(Map<String, Double> vector, String token,
			double delta) {
		token = token.toLowerCase();
		if (!vector.containsKey(token)) {
			vector.put(token, delta);
		} else {
			vector.put(token, vector.get(token) + delta);
		}
	}

	private void addToVector(Map<String, Double> vector, String token) {
		addToVector(vector, token, 1);
	}

}
