package naive2;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;

import org.json.JSONException;

import tools.DocRetriever;
import tools.LuceneNameRetriever;
import tools.QueryParser;
import tools.ResultOutput;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;
import entities.Doc;
import entities.Entity;
import entities.Query;
import entities.Result;

//use naive bag of words methods
// there should b a normalization between all the entities in the same query that if they all have a word,
//that word should become not that important
// use tf-idf for a query,i.e. calulate tf_idf for all documents in a query
public class TfIdfLinking {
	private StanfordCoreNLP pipeline;
	private QueryParser qp;
	private LuceneNameRetriever lr;
	private LuceneJsonRetriever lj;
	private DocRetriever dr ;
	private Doc2BoW d2b;
	//private J2BoWPOS j2b;
	//private JsonEntity2BoW1 j2b;
	//private JsonEntity2BoW j2b;
	private Json2BoWStop j2b;

	public TfIdfLinking(){
		
		Properties props = new Properties();
		props.put("annotators", "tokenize, ssplit, pos, lemma");
		this.pipeline = new StanfordCoreNLP(props);
		
		this.qp = new QueryParser("/largedata1/cmput696/students/lyao1/data/dev.xml");
		this.lr = new LuceneNameRetriever("/largedata1/cmput696/luceneIndex/lucene-a2e");
		this.lj = new LuceneJsonRetriever("/largedata1/cmput696/students/lyao1/lemmaIndex");
		this.dr = new DocRetriever();
		this.d2b = new Doc2BoW(this.pipeline);
		
		//this.j2b = new JsonEntity2BoW(this.pipeline);
		//this.j2b = new JsonEntity2BoW1(this.pipeline);
		//use POS information
		//this.j2b = new J2BoWPOS(this.pipeline);
		this.j2b = new Json2BoWStop(this.pipeline);
		
		
	}
	
	public TfIdfLinking(String queryDir, String resultDir){
		Properties props = new Properties();
		props.put("annotators", "tokenize, ssplit, pos, lemma");
		this.pipeline = new StanfordCoreNLP(props);
		
		//this.qp = new QueryParser("/largedata1/cmput696/students/lyao1/data/dev.xml");
		this.qp = new QueryParser(queryDir);
		this.lr = new LuceneNameRetriever("/largedata1/cmput696/luceneIndex/lucene-a2e");
		this.lj = new LuceneJsonRetriever("/largedata1/cmput696/students/lyao1/lemmaIndex");
		this.dr = new DocRetriever();
		this.d2b = new Doc2BoW(this.pipeline);
		
		//this.j2b = new JsonEntity2BoW(this.pipeline);
		//this.j2b = new JsonEntity2BoW1(this.pipeline);
		//use POS information
		//this.j2b = new J2BoWPOS(this.pipeline);
		this.j2b = new Json2BoWStop(this.pipeline);
	}
	
	public void run() throws Exception{
		List<Query> qs = this.qp.getQueries();
		/*int limit = 1;
		
		
		String query = qs.get(1).getName();
		System.out.println("Query:"+query);
		System.out.println("Pos:"+qs.get(1).getBegin()+","+qs.get(1).getEnd());
		//ArrayList<Entity> es = this.lr.getEntities(query);
		Doc doc = this.dr.getDocById(qs.get(1).getDocid());
		System.out.println("Doc:"+doc.getText());
		BoW bow = this.d2b.doc2bow(doc); 
		//System.out.println("replaced Doc"+doc.getText().replaceAll("<.*?>", ""));
		System.out.println("Doc bow:");
		System.out.println(bow);
		
		
		Entity e = new Entity("Thomasville, Georgia");
		JsonEntity je = JsonEntity.instance(e, this.lj);
		BoW ebow = this.j2b.jentity2bow(je);
		System.out.println("Entity name:"+e.getName());
		System.out.println("Entity bow:");
		System.out.println(ebow);*/
		int size = qs.size();
		int count = 0;
		ArrayList<Result> rlist = new ArrayList<Result>();
		for(Query query: qs){
			count++;
			Result r = this.processOnequery(query);
			rlist.add(r);
			System.out.println("current:"+count+"/"+size);
		}
		
		ResultOutput rop = new ResultOutput("/largedata1/cmput696/students/lyao1/data/result.xml",this.qp.getQueryPath(),rlist);
		rop.setNT(40);
		rop.output_nt();
		
	}
	
	public Result processOnequery(Query query) throws Exception{
		
		//retireve doc based on query
		Doc doc = this.dr.getDocById(query.getDocid());
		
		System.out.println("start query:"+query.getName());
		BoW doc_bow = this.d2b.doc2bow(doc);
		
		//retrieve all the entities
		ArrayList<Entity> es =this.lr.getEntities(query.getName());
		
		
		//#NOTCIE if itf for a word is low, we can consider to deduce that weight for that word in query
		//calculate idf for all the words appear in entities
		//idf_count not the real idf idf= log( D/idf_conut)
		BoW idf_count = new BoW();
		
		int D = es.size();

		//each bws in this array correspond to entity in es
		ArrayList<BoW> bws  = new ArrayList<BoW>();
		
		//calculate idf count
		
		for(Entity e:es){
			JsonEntity je = JsonEntity.instance(e, this.lj);
			BoW ebow = this.j2b.jentity2bow(je);
			bws.add(ebow);
			
			HashMap<String,Integer> bag = ebow.getbag();
			
			for(String word: bag.keySet()){
				idf_count.putWord(word);
			}
		}
		
		
		for( Entity e: es){
			//System.out.println("Scoreing entity:"+e.getName());
			int index= es.indexOf(e);
			BoW ebow = bws.get(index);
			this.scoreAEntity(e,ebow,doc_bow,idf_count, D);
			//System.out.println("Finished scoring, entity:"+e.getName()+",score"+e.getScore());
		}
		
		Result result = new Result(query,es);
		
		System.out.println("finish queury:"+query.getName());
		return result;
	}
	
	public void scoreAEntity(Entity e,BoW ebow,  BoW doc_bow, BoW idf_count, int D) throws JSONException{
	     
		float score = doc_bow.tf_idf(idf_count, ebow, D);
		e.setScore(score);
	}


	public static void main(String[] args){
		try{
			String queryDir = args[0];
			String resultDir = args[1];
			if((queryDir!=null)&&(!queryDir.equals(""))&&( resultDir!=null)&&(!resultDir.equals(""))){
				
			}
			TfIdfLinking til = new TfIdfLinking();
			til.run();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
