package mahmoud.sigir.search;

import java.io.BufferedReader;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map.Entry;

import mahmoud.utils.MinHeap;
import mahmoud.utils.Result;
import mahmoud.utils.WieghtedTerm;

import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.store.FSDirectory;



public class QueryGen 
{
	public String Field = "All";
	private String indexLocation;
	public GroundTruthSearcher GTS;
	private MinHeap<Result> iResultHeap;
	public static IndexReader luceneReader;
	HashMap<String, WieghtedTerm> TempResult = new HashMap<String, WieghtedTerm>();
	public static String OhsumedFile ="resources/builtIndexes/OhsumedVector.csv";
	public static String TrecFile = "resources/builtIndexes/TrecFTVector.csv";
	private final static double alpha = 1, beta =0.75 , gamma = 0.15;

	public QueryGen(String targetIndexLocation) 
	{
		try 
		{
			this.indexLocation =targetIndexLocation;
			luceneReader = IndexReader.open(FSDirectory.open(new File(indexLocation)));
			GTS = new GroundTruthSearcher(targetIndexLocation);
		}
		catch (Exception e) 
		{
			e.printStackTrace();
			System.out.println("could not open Index located @: "+targetIndexLocation);
			System.exit(0);
		}
	}
	public void computeNonRelevantVector(String outputFile)
	{
		TempResult.clear();
		int allDocs = luceneReader.maxDoc();
		for(int i=0;i< allDocs;i++)
		{
			System.out.println("analyzying doc No. "+ (i+1) + " of "+allDocs+" Docs ");
			readLuceneDoc(i, allDocs);
		}
		
		ArrayList<WieghtedTerm> nonRelDocsVector = new ArrayList<WieghtedTerm>(TempResult.values());
		Collections.sort(nonRelDocsVector);
		dumbToFile(nonRelDocsVector,outputFile);
//		return null;
	}
	
	private void dumbToFile(ArrayList<WieghtedTerm> al, String FileName)
	{
		try 
		{
			BufferedWriter BR = new BufferedWriter(new FileWriter(FileName,false));
			for (WieghtedTerm wt: al)
			{
				BR.append(wt.Term +","+wt.wieght+"\n");
			}
			BR.close();
		} 
		catch (Exception e) 
		{
			e.printStackTrace();
			System.out.println("Tried to open file to save data in QueryGen.dumbToFile() failed");
			return;
		}
	}
	private HashMap<String, WieghtedTerm>  readFile(String fileName)
	{
		HashMap<String, WieghtedTerm> fromFile = new HashMap<String, WieghtedTerm>();
		try 
		{
			BufferedReader BR = new BufferedReader(new FileReader(fileName));
			String line;
			while((line=BR.readLine())!=null)
			{
				String term = line.split(",")[0];
				double wieght = Double.parseDouble(line.split(",")[1]);
				fromFile.put(term, new WieghtedTerm(term, wieght));
				
			}
//			System.out.println(fromFile.size());
		} 
		catch (Exception e) 
		{
			e.printStackTrace();
			System.out.println("Tried to open file and read line in QueryGen.readFiled() failed");
		}
		return fromFile;
	}

	public ArrayList<WieghtedTerm> constructDenseQueryfromTD(MinHeap<Result> iRHeap,ArrayList<WieghtedTerm> iQuery, String diskFile, int threshold)
	{
		HashMap<String, WieghtedTerm> termsFromQueryAndRelevantDocsMap = new HashMap<String, WieghtedTerm>();
		HashMap<String, WieghtedTerm> termsFromFileMap;
		while(!iRHeap.isEmpty())
		{
			Result r = (Result) iRHeap.poll();
			readLuceneDoc(r.getID(), Searcher.TopK_DOCs);
		}
		for(WieghtedTerm wt: iQuery)
		{
			double tempWieght = 0;
			if(termsFromQueryAndRelevantDocsMap.containsKey(wt.Term))
				tempWieght += alpha * termsFromQueryAndRelevantDocsMap.get(wt.Term).wieght;
			termsFromQueryAndRelevantDocsMap.put(wt.Term, new WieghtedTerm(wt.Term, tempWieght + wt.wieght));
		}
		Iterator<String> it = TempResult.keySet().iterator();
		while(it.hasNext())
		{
			String term = it.next();
			double wieght= beta * TempResult.get(term).wieght;
			if(termsFromQueryAndRelevantDocsMap.containsKey(term))
				wieght += termsFromQueryAndRelevantDocsMap.get(term).wieght;
			termsFromQueryAndRelevantDocsMap.put(term, new WieghtedTerm(term, wieght));

		}
		TempResult.clear();
		termsFromFileMap = readFile(diskFile);
		ArrayList<WieghtedTerm> result  = new ArrayList<WieghtedTerm>();
		Iterator<Entry<String, WieghtedTerm>> nextEntry = termsFromQueryAndRelevantDocsMap.entrySet().iterator();
		while(nextEntry.hasNext())
		{
			Entry<String, WieghtedTerm> nextEntery =  nextEntry.next();
			String nextTerm = nextEntery.getKey();
			double relWieght = nextEntery.getValue().wieght;
			double nonRelWieght =0;
			if(termsFromFileMap.containsKey(nextTerm))
				nonRelWieght = gamma * termsFromFileMap.get(nextTerm).wieght;
			double newWieght = relWieght - nonRelWieght;
			if(!Double.isNaN(newWieght)&& !Double.isInfinite(newWieght))
				result.add(new WieghtedTerm(nextTerm, newWieght));
		}
		Collections.sort(result);
		result = normalize(result);
		if(threshold > 1)
		{
			ArrayList<WieghtedTerm> result2 = new ArrayList<WieghtedTerm> (result.subList(0, threshold));
			return result2;
		}
		return result;
	}
	
	public ArrayList<WieghtedTerm> constructDenseQuery(ArrayList<WieghtedTerm> iQuery, String diskFile)
	{
		HashMap<String, WieghtedTerm> termsFromQueryAndRelevantDocsMap = new HashMap<String, WieghtedTerm>();
		HashMap<String, WieghtedTerm> termsFromFileMap;
		
		TempResult.clear();
		if(!GTS.checkOpenStatus())
		{
			System.out.println("Could not open Searcher correctly");
			System.exit(0);
		}
		iResultHeap = GTS.luceneSearch(iQuery);
		while(!iResultHeap.isEmpty())
		{
			Result r = (Result) iResultHeap.poll();
			readLuceneDoc(r.getID(), Searcher.TopK_DOCs);
		}
		// how do you make sure that if the term did exist in the original query and in the relevant terms query that it does accumulate
		for(WieghtedTerm wt: iQuery)
		{
			double tempWieght = 0;
			if(termsFromQueryAndRelevantDocsMap.containsKey(wt.Term))
				tempWieght += alpha * termsFromQueryAndRelevantDocsMap.get(wt.Term).wieght;
			termsFromQueryAndRelevantDocsMap.put(wt.Term, new WieghtedTerm(wt.Term, tempWieght + wt.wieght));
		}
		Iterator<String> it = TempResult.keySet().iterator();
		while(it.hasNext())
		{
			String term = it.next();
			double wieght= beta * TempResult.get(term).wieght;
			if(termsFromQueryAndRelevantDocsMap.containsKey(term))
				wieght += termsFromQueryAndRelevantDocsMap.get(term).wieght;
			termsFromQueryAndRelevantDocsMap.put(term, new WieghtedTerm(term, wieght));
		}
		TempResult.clear();
		termsFromFileMap = readFile(diskFile);
		ArrayList<WieghtedTerm> result  = new ArrayList<WieghtedTerm>();
		Iterator<Entry<String, WieghtedTerm>> nextEntry = termsFromQueryAndRelevantDocsMap.entrySet().iterator();
		while(nextEntry.hasNext())
		{
			Entry<String, WieghtedTerm> nextEntery =  nextEntry.next();
			String nextTerm = nextEntery.getKey();
			double relWieght = nextEntery.getValue().wieght;
			double nonRelWieght =0;
			if(termsFromFileMap.containsKey(nextTerm))
				nonRelWieght = gamma * termsFromFileMap.get(nextTerm).wieght;
			double newWieght = relWieght - nonRelWieght;
			if(!Double.isNaN(newWieght)&& !Double.isInfinite(newWieght))
				result.add(new WieghtedTerm(nextTerm, newWieght));
		}
		Collections.sort(result);
		result = normalize(result);
		return result;
	}
	
	private ArrayList<WieghtedTerm> normalize(ArrayList<WieghtedTerm> input)
	{
		Collections.sort(input);
		double max = input.get(0).wieght;
		double min = input.get(input.size()-1).wieght;
		for(WieghtedTerm wt: input)
			wt.wieght = (wt.wieght-min)/(max-min);
		return input;
	}
	
	private void readLuceneDoc(int id, int factor)
	{
		try 
		{
			TermFreqVector TFV =  luceneReader.getTermFreqVector(id, Field);
			String[] terms = TFV.getTerms();
			int[] frequencies = TFV.getTermFrequencies();
			float DocLength = 0;
			for(int j =0 ; j< frequencies.length ; j++)
				DocLength += frequencies[j];

			if(TFV!= null)
				for(int k = 0 ; k< frequencies.length ; k++)
				{
					double OldTFIDF=0, newTFIDF, TF,IDF;
					if(factor ==luceneReader.maxDoc() && frequencies[k]<4)
						continue;
					if(TempResult.containsKey(terms[k]))
					{
						OldTFIDF = TempResult.get(terms[k]).wieght;
					}
					TF = (frequencies[k]/DocLength);
					IDF = Math.log10(luceneReader.maxDoc()/(1+luceneReader.docFreq(new Term(Field,terms[k]))));
					//				System.out.println("Term: "+ terms[k]+ " TF: "+ newTF + " IDF: "+ newIDF);
					newTFIDF = OldTFIDF + ( (TF *IDF)/factor );
					TempResult.put(terms[k], new WieghtedTerm(terms[k], newTFIDF));
				}
		} 
		catch (Exception e) 
		{
			System.out.println("what the hell happened?" );
			e.printStackTrace();
		}  

	}
	public static void main(String[] args) 
	{
		System.out.println("Testing");
		QueryGen QG = new QueryGen(Searcher.OhsumedIndexLocation);
//		QG.computeNonRelevantVector(QueryGen.OhsumedFile);
//		System.exit(0);
		
		ArrayList<WieghtedTerm> iQuery = new ArrayList<WieghtedTerm>();
		iQuery.add(new WieghtedTerm("prostate", 1));
		iQuery.add(new WieghtedTerm("cancer", 1));
		long start, end;
		QG.GTS.openSearcher();
		
		start = System.currentTimeMillis();
		MinHeap<Result> r1 = QG.GTS.luceneSearch(iQuery);
		end = System.currentTimeMillis();
//		System.out.println(r1);
//		System.out.println("searching Took: "+(end-start)/1000.0+" Seconds.");
		
//		start = System.currentTimeMillis();
//		MinHeap<Result> r0 = QG.GTS.searchUsingLuceneReader(iQuery);
//		end = System.currentTimeMillis();
//		System.out.println(r0);
//		System.out.println("searching Took: "+(end-start)/1000.0+" Seconds.");
		
//		System.out.println(QG.constructDenseQuery(iQuery).size());
		start = System.currentTimeMillis();
//		QG.readFile(OhsumedFile);
		ArrayList<WieghtedTerm> rQuery  = QG.constructDenseQuery(iQuery,TrecFile);
//		System.out.println(rQuery);
//		System.out.println();
		end = System.currentTimeMillis();
		System.out.println("construction Took: "+(end-start)/1000.0+" Seconds.");

		start = System.currentTimeMillis();
//		MinHeap<Result> r = QG.GTS.searchUsingLuceneReader(rQuery);
		MinHeap<Result> r = QG.GTS.luceneSearch(rQuery);
		end = System.currentTimeMillis();
		System.out.println(r);
		System.out.println("searching Took: "+(end-start)/1000.0+" Seconds.");
		QG.GTS.closeSearcher();
	}

}
