package tools;

import java.io.File;
import java.io.Reader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;

import naive2.BoW;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.ngram.NGramTokenizer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.Fieldable;
import org.apache.lucene.document.NumericField;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.IndexWriterConfig;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.FieldCache;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.QueryTermVector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.MMapDirectory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.util.Version;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;

import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.ling.CoreAnnotations.LemmaAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.SentencesAnnotation;
import edu.stanford.nlp.ling.CoreAnnotations.TokensAnnotation;
import edu.stanford.nlp.pipeline.Annotation;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import edu.stanford.nlp.util.CoreMap;



//try to convert to originall index to the lemmellized one
public class IndexLemmelizer {
	private static final int NGRAM = 2;

	private IndexWriter writer = null;


	private Analyzer analyzer = null;

	private String indexDir = null;
	
	private LuceneIndexReader lr = null;
	private StanfordCoreNLP pipeline;

	private class NGramAnalyzer extends Analyzer {
		private int ngram = 0;
		
		public NGramAnalyzer(int ngram) {
			this.ngram = ngram;
		}
		
		public TokenStream tokenStream(String fieldName, Reader reader) {
			return new NGramTokenizer(reader, 2, ngram);
		}
	}

	public IndexLemmelizer() {
		analyzer = new NGramAnalyzer(NGRAM);
	}
	
	public IndexLemmelizer(String indexLoc, LuceneIndexReader lr, StanfordCoreNLP pipeline) {
		this();
		this.indexDir = indexLoc;
		this.lr = lr;
		this.pipeline = pipeline;
	}

	/**
	 * Init the index writer for the indexing. 
	 * 
	 * @param dirLoc The directory for storing the lucene index.
	 */
	public void initWriter(String dirLoc) {
		Directory dir = null;
		IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_34, analyzer); 
		Document doc = null;
		
		try {
			dir = new MMapDirectory(new File(dirLoc));
			writer = new IndexWriter(dir, conf);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	/**
	 * Close and finalize the index writer.
	 */
	public void closeWriter() {
		try {
			writer.optimize();
			writer.close();
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	
	public void write(){
		
		this.initWriter(this.indexDir);
		
		this.lr.queryEntityJSONIndex("Michael Jordan");//make lr initate name2id
    	Map<String,Integer> name2id = this.lr.name2id;
    	
    	int size = this.lr.name2id.size();
    	int count = 0;
    	
    	for( String key: name2id.keySet() ){
    		
    	//	if ( count>limit ){
    	//		break;
    	//	}
    		String jsonDoc = this.lr.queryEntityJSONIndex(key);
    		System.out.println("write:"+key);
    		String lemmaJsonDoc = this.lemmaJsondDoc(jsonDoc);
    		this.addDocument(key, lemmaJsonDoc);
    	    System.out.println("current:"+count+"/"+size);
    		count++;
    	}
    	
    	
		
		
		this.closeWriter();
	}
	
	public String lemmaJsondDoc( String jsonDoc){
		long t1 = System.currentTimeMillis();
		try {
			JSONObject jobj = new JSONObject(jsonDoc);
			
			//lemm name
			String name = jobj.getString("name");
			name = this.lemma(name);
			jobj.put("name", name);
			
			//lemma key values
			if( !jobj.isNull("keys") ){
				JSONObject keyobj = jobj.getJSONObject("keys");
				
				for(Object key:keyobj.keySet()){
					String s_key = (String)key;
					String value = keyobj.getString(s_key);
					value = this.lemma(value);
					keyobj.put(s_key, value);
					
				}
				
			}
			
			//lemma Context keys
			if( !jobj.isNull("contexts")){
				
				JSONObject conobj = jobj.getJSONObject("contexts");
				JSONObject lconobj = new JSONObject();
				
				for( Object key: conobj.keySet()){
					String s_key = (String) key;
					int freq = conobj.getInt(s_key);
					s_key = this.lemma(s_key);
					lconobj.put(s_key, freq);		
				
				}
				jobj.put("contexts", lconobj);
			
			}
			
			//lemma categories
			if( !jobj.isNull("categories")){
				
				JSONObject catobj = jobj.getJSONObject("categories");
				JSONObject lcatobj = new JSONObject();
				
				for( Object key: catobj.keySet()){
					String s_key = (String) key;
					int freq = catobj.getInt(s_key);
					s_key = this.lemma(s_key);
					lcatobj.put(s_key, freq);		
				
				}
				jobj.put("categories", lcatobj);
				
			}
			
			//lemma types
			if( !jobj.isNull("types")){
				JSONArray arr = jobj.getJSONArray("types");
				JSONArray larr = new JSONArray();
				
				for( int i=0; i <arr.length(); i++ ){
					String type = arr.getString(i);
					
					//split and lemma
					String [] ss = type.split("/|_");
					StringBuffer sf = new StringBuffer();
					for(String s: ss){
						String ab = s;
						ab = this.lemma(ab);
						sf.append(ab);
						sf.append(" ");
					}
					
					type = sf.toString();
					
					larr.put(type);
				}
				jobj.put("types", larr);
				
			}
			
			//lemma alias
			if( !jobj.isNull("aliases")){
				
				JSONObject aobj = jobj.getJSONObject("aliases");
				JSONObject laobj = new JSONObject();
				
				for( Object key: aobj.keySet() ){
					String skey = (String)key;
					JSONObject value = aobj.getJSONObject(skey);
					skey = this.lemma(skey);
					
					laobj.put(skey, value);
				}
				
				jobj.put("aliases", laobj);
			}
			
			//lemma al1
			if( !jobj.isNull("alternativeName1")){
				String al1 = jobj.getString("alternativeName1");
				al1 = this.lemma(al1);
				jobj.put("alternativeName1", al1);
			}
			
			//lemma al2
			if( !jobj.isNull("alternativeName2")){
				String al2 = jobj.getString("alternativeName2");
				al2 = this.lemma(al2);
				jobj.put("alternativeName2", al2);
			}
			
			//lemma als
			if( !jobj.isNull("alternativeNames")){
				JSONArray arr = jobj.getJSONArray("alternativeNames");
				JSONArray larr = new JSONArray();
				for(int i=0;i<arr.length();i++){
					String n = arr.getString(i);
					n = this.lemma(n);
					larr.put(n);
				}
				
				jobj.put("alternativeNames", larr);
			}
			
			long t2 = System.currentTimeMillis();
			System.out.println("used for lemma:"+(t2-t1) );
			return jobj.toString();
			
		} catch (JSONException e) {
			// TODO Auto-generated catch block
			System.out.println("wrong while convert jsonDoc to jsonObj");
			e.printStackTrace();
		}
		return null;

	}
	
	public String lemma(String s){
		
		Annotation document = new Annotation(s);
		pipeline.annotate(document);
	    List<CoreMap> sentences = document.get(SentencesAnnotation.class);
	    
	    StringBuffer sb = new StringBuffer();
	    for(CoreMap sentence: sentences) {
	        // traversing the words in the current sentence
	        // a CoreLabel is a CoreMap with additional token-specific methods
	        for (CoreLabel token: sentence.get(TokensAnnotation.class)) {
	          // this is the text of the token
	         // String word = token.get(TextAnnotation.class);
	          // this is the POS tag of the token
	          //String pos = token.get(PartOfSpeechAnnotation.class);
	      
	          String lemma = token.get(LemmaAnnotation.class);
	          if( lemma.equals("-LRB-")|| lemma.equals("-RRB-")|| lemma.equals("-lrb-")||lemma.equals("-rrb-")||lemma.equals(",")||lemma.equals(".")){
	        	  continue;
	          }
	          // also need to handle when lemma is punctation or "a" "the"
	          
	          
	          //System.out.println("word:"+word+",lemma:"+lemma+",pos:"+pos);
	          
	          //change to its low case, because in json File they are all low cased
	          lemma = lemma.toLowerCase();
	          //bow.putWord(lemma);
	          sb.append(lemma);
	          sb.append(" ");
	         
	        }

	      }
	    return sb.toString();
	}

	
	/**
	 * This function shows how we create the entity-to-JSON mapping index.
	 * Here JSON means the JSON representation of the entity (see the kb.json for details).
	 * Two fields are created:
	 * 	- docID : the entity name
	 * 	- content : the JSON content.
	 * 
	 * @param entityName
	 * @param entityJSON
	 */
	public void addDocument(String entityName, String entityJSON) {
		try {
			String name = entityName;
			String content = entityJSON;

			Document doc = new Document();
			Fieldable field = null;

			field = new Field("docID", name, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(field);
			field = new Field("content", content, Field.Store.YES, Field.Index.NOT_ANALYZED_NO_NORMS);
			doc.add(field);

			writer.addDocument(doc);
		} catch (Exception e) {
			e.printStackTrace();
		}
	}
	
	public static void main(String[]args) throws JSONException{
		Properties props = new Properties();
		props.put("annotators", "tokenize, ssplit, pos, lemma");
		StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
		LuceneIndexReader lr = new LuceneIndexReader("/largedata1/cmput696/luceneIndex/lucene-e2d");
		IndexLemmelizer il = new IndexLemmelizer("/largedata1/cmput696/students/lyao1/lemmaIndex/",lr,pipeline);
		il.write();
		
	
		
	//	String jdoc = lr.queryEntityJSONIndex("Michael Jordan");
	//	JSONObject jobj = new JSONObject(jdoc);
		
	//	System.out.println("original:");
	//	System.out.println(jdoc);
		
	//	System.out.println("JSONOBJ *********");
	//	System.out.println(jobj.toString());
		
	//	System.out.println("lemm ************");
	//	long t1 = System.currentTimeMillis();
	//	String ldoc = il.lemmaJsondDoc(jobj.toString());
	//	long t2 = System.currentTimeMillis();
	//	System.out.println("used for lemma:"+(t2-t1) );
	//	System.out.println(ldoc);
		
		
	}



}
