package org.jiangwei.cmput696.entitylinking.algorithm;

import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.util.HashSet;
import java.util.Set;

import opennlp.tools.sentdetect.SentenceDetector;
import opennlp.tools.sentdetect.SentenceDetectorME;
import opennlp.tools.sentdetect.SentenceModel;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FSDirectory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;

public final class AlgoHelper {

	private static final int NGRAM = 2;

	public static final String ALIAS_INDEX_DIR = "/largedata1/cmput696/luceneIndex/lucene-a2e";

	public static final String ENTITY_INDEX_DIR = "/largedata1/cmput696/luceneIndex/lucene-e2d";

	private static IndexReader aliasReader;

	private static IndexSearcher aliasSearcher;

	private static IndexReader entityReader;

	private static IndexSearcher entitySearcher;

	private static Analyzer analyzer;
	
	private static SentenceDetector sentenceDetector;

	private static Tokenizer tokenizer;
	
	public static SentenceDetector getSentenceDetector() {
		if (sentenceDetector == null) {
			initModels();
		}
		return sentenceDetector;
	}

	public static Tokenizer getTokenizer() {
		if (tokenizer == null) {
			initModels();
		}
		return tokenizer;
	}
	
	private static void initModels() {
		InputStream sentModelInput = null, tokenModelInput = null;
		try {
			sentModelInput = new FileInputStream("../test_files/en-sent.bin");
			SentenceModel sentModel = new SentenceModel(sentModelInput);
			System.out.println(sentModel.getLanguage()
					+ " sentence model initiated.");

			sentenceDetector = new SentenceDetectorME(sentModel);

			// tokenize
			tokenModelInput = new FileInputStream("../test_files/en-token.bin");
			TokenizerModel tokenModel = new TokenizerModel(tokenModelInput);
			tokenizer = new TokenizerME(tokenModel);
		} catch (FileNotFoundException e1) {
			e1.printStackTrace();
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			if (sentModelInput != null) {
				try {
					sentModelInput.close();
				} catch (IOException e) {
				}
			}
			if (tokenModelInput != null) {
				try {
					tokenModelInput.close();
				} catch (IOException e) {
				}
			}
		}
	}

	public static class NGramAnalyzer extends Analyzer {
		private int ngram = 0;

		public NGramAnalyzer(int ngram) {
			this.ngram = ngram;
		}

		public TokenStream tokenStream(String fieldName, Reader reader) {
			return new NGramTokenizer(reader, 2, ngram);
		}
	}

	private static IndexReader prepare(String indexDir) {
		Directory dir = null;
		try {
//			dir = FSDirectory.open(new File(indexDir));
			dir = new RAMDirectory(new MMapDirectory(new File(indexDir)));
			if (!IndexReader.indexExists(dir)) {
				return null;
			}
			return IndexReader.open(dir);
		} catch (IOException e) {
			e.printStackTrace();
			return null;
		}
	}

	static {
		analyzer = new NGramAnalyzer(NGRAM);
		aliasReader = prepare(ALIAS_INDEX_DIR);
		aliasSearcher = new IndexSearcher(aliasReader);
		entityReader = prepare(ENTITY_INDEX_DIR);
		entitySearcher = new IndexSearcher(entityReader);
	}

	public static Set<String> queryAliasIndex(String queryStr) {
		Set<String> ret = new HashSet<String>();

		try {
			queryStr = queryStr.toLowerCase();

			// Just do a quick search
			QueryParser parser = new QueryParser(Version.LUCENE_34, "alias",
					analyzer);
			Query query = parser.parse(queryStr);
			
			TopDocs td = aliasSearcher.search(query, 350);

			if (td == null || td.totalHits == 0) {
				System.out.println("No hits");
				return ret;
			}
//			td = aliasSearcher.search(query, td.totalHits);

			// further pruning unrelated results using other similarity
			// functions,
			// e.g, dice coefficient, edit distance, NGramDistance, etc.
			// Set<String> rankList1 = rankingByDiceCoefficient(td, queryStr);

			// The following simply put the search results into the ret.
			// You may need to prune these unrelated results.
			for (int i = 0; i < td.scoreDocs.length; i++) {
				int docId = td.scoreDocs[i].doc;
				Document doc = aliasReader.document(docId);
				Fieldable[] fields = doc.getFieldables("content");
				for (Fieldable field : fields) {
					String entName = field.stringValue();
					ret.add(entName);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		}

		return ret;
	}

	public static String queryEntityJSONIndex(String entName) {
		String ret = null;

		try {
			// convert the id->string map to string->id map.
			/*
			 * if (name2id == null) { name2id = new HashMap<String, Integer>();
			 * for (int i = 0; i < keyArray.length; i++)
			 * name2id.put(keyArray[i], i); }
			 * 
			 * if (!name2id.containsKey(entName)) return null;
			 * 
			 * int docId = name2id.get(entName);
			 */
			Query query = new TermQuery(new Term("docID", entName));
			TopDocs td = entitySearcher.search(query, 1);

			if (td == null || td.totalHits == 0) {
				System.out.println("No json hits");
				return null;
			}

			int docId = td.scoreDocs[0].doc;

			Document doc = entityReader.document(docId);
			Fieldable field = doc.getFieldable("content");
			ret = field.stringValue();
		} catch (Exception e) {
			e.printStackTrace();
		}

		return ret;
	}

}
