package tools;
import java.io.File;
import java.io.Reader;
import java.io.BufferedReader;
import java.io.FileReader;
import java.io.StringReader;
import java.util.Collections;
import java.util.Set;
import java.util.HashSet;
import java.util.Map;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;

import org.apache.lucene.util.Version;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.Field.Index;
import org.apache.lucene.document.Field.Store;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.QueryTermVector;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.analysis.TokenStream;

//add by lei yao
import java.io.FileWriter;
import java.io.BufferedWriter;
//////////////////////

public class LuceneIndexReader {
	private static final int NGRAM = 2;

	private IndexWriter writer = null;
	private IndexReader reader = null;
	private IndexSearcher searcher = null;

	private Analyzer analyzer = null;
	private String[] keyArray = null;
	private int[] sizeArray = null;
	public Map<String, Integer> name2id = null;

	private class NGramAnalyzer extends Analyzer {
		private int ngram = 0;
		
		public NGramAnalyzer(int ngram) {
			this.ngram = ngram;
		}
		
		public TokenStream tokenStream(String fieldName, Reader reader) {
			return new NGramTokenizer(reader, 2, ngram);
		}
	}

	public LuceneIndexReader() {
		analyzer = new NGramAnalyzer(NGRAM);
	}
	
	public LuceneIndexReader(String indexLoc) {
		this();
		loadIndex(indexLoc);
	}

	/**
	 * Init the index writer for the indexing. 
	 * 
	 * @param dirLoc The directory for storing the lucene index.
	 */
	public void initWriter(String dirLoc) {
		Directory dir = null;
		IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_34, analyzer); 
		Document doc = null;
		
		try {
			dir = new MMapDirectory(new File(dirLoc));
			writer = new IndexWriter(dir, conf);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * Close and finalize the index writer.
	 */
	public void closeWriter() {
		try {
			writer.optimize();
			writer.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * This function shows how we created the alias to entity mapping lucene index.
	 * This index has the following fields
	 * 	- docID	: 
	 *  - alias :
	 *  - size : size of the term vector (in n-gram)
	 *  - content : the entity name.
	 * Please refer to lucene 3.4 api document for details about how to query the index
	 * from different fields, 
	 * 
	 * @param alias The surface form of an entity. e.g. Jordan
	 * @param entities	The set of entities that the alis could refer to. e.g. Michael Jordan, 
	 * 		 			Eddie Jordan, Jordan, etc.
	 */
	public void addDocument(String alias, List<String> entities) {
		try {
			Document doc = new Document();
			
			Fieldable field = null;

			field = new Field("docID", alias.toLowerCase(), Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(field);
			field = new Field("alias", alias.toLowerCase(), Field.Store.YES, 
					Field.Index.ANALYZED, Field.TermVector.YES);
			doc.add(field);
			QueryTermVector vector = new QueryTermVector(alias.toLowerCase(), analyzer);
			NumericField nField = new NumericField("size");
			nField.setIntValue(vector.size());
			doc.add(nField);
			
			for (String entity : entities) {
				field = new Field("content", entity, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
				doc.add(field);
			}
			
			writer.addDocument(doc);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	
	/**
	 * This function shows how we create the entity-to-JSON mapping index.
	 * Here JSON means the JSON representation of the entity (see the kb.json for details).
	 * Two fields are created:
	 * 	- docID : the entity name
	 * 	- content : the JSON content.
	 * 
	 * @param entityName
	 * @param entityJSON
	 */
	public void addDocument(String entityName, String entityJSON) {
		try {
			String name = entityName;
			String content = entityJSON;

			Document doc = new Document();
			Fieldable field = null;

			field = new Field("docID", name, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(field);
			field = new Field("content", content, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(field);

			writer.addDocument(doc);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * This function gives an example about how to load the alias2entity alias.
	 * @param idxLoc
	 */
	private void loadIndex(String idxLoc) {
		Directory dir = null;
		
		try {
			dir = new RAMDirectory(new MMapDirectory(new File(idxLoc)));
			if (!IndexReader.indexExists(dir))
				return;
			
			reader = IndexReader.open(dir);

			//The keyArray is a mapping between the lucene internal id to the string in field "docID"
			//The mapping is stored here to improve the search performance.
			keyArray = FieldCache.DEFAULT.getStrings(reader, "docID");
			sizeArray= FieldCache.DEFAULT.getInts(reader, "size");
			searcher = new IndexSearcher(reader);
			System.out.println("Load index finished ");
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	
	public Set<String> queryAliasIndex(String queryStr) {
		Set<String> ret = new HashSet<String>();
		
		try {
			queryStr = queryStr.toLowerCase();

			//Just do a quick search
			QueryParser parser = new QueryParser(Version.LUCENE_34, "alias", analyzer);
			Query query = parser.parse(queryStr);
			//TopDocs td = searcher.search(query, 200);
			//TopDocs td = searcher.search(query, 500);
			TopDocs td = searcher.search(query, 350);
			if (td == null || td.totalHits == 0) {
				System.out.println("No hits");
				return null;
			}

			//further pruning unrelated results using other similarity functions, 
			//e.g, dice coefficient, edit distance, NGramDistance, etc.
			//Set<String> rankList1 = rankingByDiceCoefficient(td, queryStr);
			
			//The following simply put the search results into the ret. 
			//You may need to prune these unrelated results.
			for (int i = 0; i < td.scoreDocs.length; i++) {
				int docId = td.scoreDocs[i].doc;
				Document doc = reader.document(docId);
				Fieldable[] fields = doc.getFieldables("content");
				for (Fieldable field : fields) {
					List<String> list = null;
					String entName = field.stringValue();
					ret.add(entName);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
		
		return ret;
	}
	
	public String queryEntityJSONIndex(String entName) {
		String ret = null;
		
		try {
			//convert the id->string map to string->id map.
			if (name2id == null) {
				name2id = new HashMap<String, Integer>();
				for (int i = 0; i < keyArray.length; i++)
					name2id.put(keyArray[i], i);
			}
			
			if (!name2id.containsKey(entName))
				return null;
				
			int docId = name2id.get(entName);
			Document doc = reader.document(docId);
			Fieldable field = doc.getFieldable("content");
			ret = field.stringValue();
		} catch (Exception e) {
			e.printStackTrace();
		}
		
		return ret;
	}
	
	public static void main(String[] args) {
		try {
			//load the alias2entity index and test the query function.
			LuceneIndexReader r1 = new LuceneIndexReader(args[0]);
			Set<String> ret1 = r1.queryAliasIndex("jordan");
			System.out.println("jordan:");
			for (String entName : ret1)
				System.out.println("\t" + entName);

			//load the entity2JSON index and test the query function.
			LuceneIndexReader r2 = new LuceneIndexReader(args[1]);
			String jsonDoc = r2.queryEntityJSONIndex("Michael Jordan");
			System.out.println("Michael Jordan");
			System.out.println("\t" + jsonDoc);

			//add by lei yao
   			File jsonFile = new File("/largedata1/cmput696/students/lyao1/lucene/jsonSample");
			BufferedWriter output = new BufferedWriter( new FileWriter(jsonFile) );
			output.write(jsonDoc);
			output.close();
			////////////////////

		} catch (Exception e) {
			e.printStackTrace();
		}
	}
}

