package searcher;

import java.util.*;

import util.cache.core.AbstractRecoverResult;
import util.cache.core.Cacher;
import util.cache.normal.NormalCacher;
import indexer.*;

/**
 * a some kind improved but still dumpy searcher
 * @author shitgao
 *
 */

public class NittySearcher implements ISearcher{
	private final ReverseIndex ri;
	private final Cacher<QueryParser, HitSet> cacher =
		new NormalCacher<QueryParser, HitSet>("", null, 100);
	private String originalQuery;
	private QueryParser qp;
	private Vector<FileNumSetElem> result;
	private HitSet hits;
	private final float RANK_CONST_POS=(float)10;//should be check whether this num works well 
	
	public NittySearcher (ReverseIndex ri){
		this.ri=ri;
		//qp=new QueryParser();
	}
	
	//implements the interface
	public HitSet getSearchResult (Request request){
		originalQuery=request.getOriginalQuery();
		qp=new QueryParser(originalQuery);
		if (!qp.hasQueryStrings())
			return null;
		//use cache
		AbstractRecoverResult<HitSet> recoverResult = cacher.recover(qp);
		if (recoverResult != null) {
			System.out.println("Cached result returned for:'" + request.getOriginalQuery() + "'");
			return recoverResult.getValue();
		}
		
		//whether the query has some coercive part will results different operation
		//maybe another modification can be added here
		if (!qp.hasCoerciveStrings()){
			List<String> forbiddenStrs;
			List<String> additiveStrs;
			forbiddenStrs=qp.getForbiddenStrings();
			additiveStrs=qp.getAdditiveStrings();
			Vector<Vector<Integer>> forbiddenSets=stringsToSets(forbiddenStrs);
			Vector<Vector<Integer>> additiveSets=stringsToSets(additiveStrs);
			FileNumSet additiveFNS=new FileNumSet(additiveSets);
			FileNumSet forbiddenFNS=new FileNumSet(forbiddenSets);
			additiveFNS.union();
			forbiddenFNS.union();
			additiveFNS.differenceWith(forbiddenFNS.getResults());
			result=additiveFNS.getResults();
		}
		else{
			List<String> forbiddenStrs;
			List<String> coerciveStrs;
			List<String> additiveStrs;
			forbiddenStrs=qp.getForbiddenStrings();
			coerciveStrs=qp.getCoerciveStrings();
			additiveStrs=qp.getAdditiveStrings();
			Vector<Vector<Integer>> forbiddenSets=stringsToSets(forbiddenStrs);
			Vector<Vector<Integer>> coerciveSets=stringsToSets(coerciveStrs);
			Vector<Vector<Integer>> additiveSets=stringsToSets(additiveStrs);
			FileNumSet additiveFNS=new FileNumSet(additiveSets);
			FileNumSet coerciveFNS=new FileNumSet(coerciveSets);
			FileNumSet forbiddenFNS=new FileNumSet(forbiddenSets);
			additiveFNS.union();
			coerciveFNS.intersection();
			forbiddenFNS.union();
			coerciveFNS.differenceWith(forbiddenFNS.getResults());
			coerciveFNS.mergeWith(additiveFNS.getResults());
			result=coerciveFNS.getResults();
		}
		//to be continued
		//first score result
		//rank with scores
		if (result==null)
			return new HitSet();
		Vector<Integer> rankedresult=rankResults();
		
		//then turn them into HitSet
		
		hits=new HitSet();
		
		for (int i=0;i<rankedresult.size();i++)
			hits.addHit(new Hit(ri.queryFileSN(rankedresult.get(i))));
		//also don't forget the cacher
		
		cacher.cache(qp, hits);
		
		return hits;
	}
	
	private Vector<Vector<Integer>> stringsToSets(List<String> strings){
		if (strings==null)
			return null;
		Vector<Vector<Integer>> set=new Vector<Vector<Integer>>();
		String word;
		for (int i=0;i<strings.size();i++){
			word=strings.get(i);
			set.add(ri.filesContainsWord(word));
		}
		return set;		
	}
	
	//what i didn't take into account here is the length of file
	//may be added to improve the ranking scheme
	private Vector<Integer> rankResults(){
		if (result==null)
			return null;
		List<String> additiveStrs=qp.getAdditiveStrings();
		List<String> coerciveStrs=qp.getCoerciveStrings();
		Vector<Vector<Integer>> posSets;
		Vector<Integer> vec,appears;
		Vector<String> strs;
		String word;
		int fSN;
		int len=0;
		if (qp.hasAdditiveStrings())
			len+=additiveStrs.size()/2;
		if (qp.hasCoerciveStrings())
			len+=coerciveStrs.size();
		FileNumSet fns=new FileNumSet();//just use the leastCover() method
		
		Vector<HeapElemFloat> score=new Vector<HeapElemFloat>(result.size());
		//just for the convenience of heap-sorting
		
		int size=result.size();
		Float scorethis=(float)0;
		
		for (int i=0;i<size;i++){
			scorethis=(float)0;
			fSN=result.get(i).getKey();
			strs=new Vector<String>();
			posSets=new Vector<Vector<Integer>>();
			if (qp.hasCoerciveStrings()){
				strs.addAll(coerciveStrs);
			}
			if (qp.hasAdditiveStrings()){
				appears=result.get(i).getAppears();
				if (appears!=null){
					for (int j=0;j<appears.size();j++){
						strs.add(additiveStrs.get(appears.get(j)));
					}
				}
			}
			//calculate scores 
			//the additive words and the coercive words are considered with no difference
			//the difference can be introduced if someone wants
			for (int j=0;j<strs.size();j++){
				word=strs.get(j);
				vec=ri.positionsWordAppear(word, fSN);
				//here define part of the score, regarding the frequency of the word appears in the file
				//as well as the rareness of the word
				scorethis+=Math.sqrt(((float)vec.size())/((float)ri.filesContainsWord(word).size()));
				posSets.add(vec);
			}
			fns.resetSets(posSets);
			//another part of the score, regarding the distance between the word
			scorethis+=RANK_CONST_POS*((float)strs.size())/((float)(fns.leastCover()+len));
			score.add(new HeapElemFloat(scorethis,i));
		}
		
		//sort 
		MinHeap<HeapElemFloat>mh=new MinHeap<HeapElemFloat>();
		Vector<Integer> rankedResults=new Vector<Integer>(size);
		for (int i=0;i<size;i++){
			mh.addWithOrder(score.get(i));
			rankedResults.add(-1);
		}
		for (int i=0;i<size;i++){
			int temp=result.get(mh.removeWithOrder().getSource()).getKey();
			rankedResults.set(size-1-i, temp);
		}
		return rankedResults;
		
	}

}
