/** Exhibits standard Lucene searches for ranking documents.
 * 
 * @author Scott Sanner
 */

package search;

import java.io.*;
import java.text.DecimalFormat;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.DirectoryReader;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.queryparser.classic.ParseException;
import org.apache.lucene.queryparser.classic.QueryParser;
import org.apache.lucene.search.BooleanQuery;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopScoreDocCollector;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.RAMDirectory;
import org.apache.lucene.store.SimpleFSDirectory;
import org.apache.lucene.util.Version;

import util.DocUtils;
import util.HammingDist;

public class SimpleSearchRankerKeywords extends SimpleSearchRanker {

	public SimpleSearchRankerKeywords(String index_path, String default_field, Analyzer a) 
		throws IOException {
		super(index_path, default_field, a);
	}
	
	public Set<String> doSearchSet(String query) 
		throws Exception {
		Query q = _parser.parse("\"" + query + "\"~0");
		// TODO : Review the hits of 10. Parameterize it
		TopScoreDocCollector collector = TopScoreDocCollector.create(10, true);
		_searcher.search(q, collector);
		ScoreDoc[] hits = collector.topDocs().scoreDocs;
		HashSet<String> resultSet = new HashSet<String>();
		
		for (int i = 0; i < hits.length; i++) {
			resultSet.add(_searcher.doc(hits[i].doc).get("PATH"));
		}
		return resultSet;
		
	}
	
	
	public String doSearch(String query, String targetDataName)
			throws Exception {
			Query q = _parser.parse("\"" + query + "\"~0");
			// TODO : Review the hits of 10. Parameterize it
			TopScoreDocCollector collector = TopScoreDocCollector.create(10, true);
			_searcher.search(q, collector);
			ScoreDoc[] hits = collector.topDocs().scoreDocs;
			
			int dist;
			int minDist = Integer.MAX_VALUE;
			String fileName = null;
			
			File targetFile = new File(targetDataName);
			Map<Object, Double> TargetWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(targetFile));
			
			for (int i = 0; i < hits.length; i++) {
				File sourceFile = new File(_searcher.doc(hits[i].doc).get("PATH"));
				Map<Object, Double> SourceWordCount = DocUtils.ConvertToFeatureMapFilter(DocUtils.ReadFile(sourceFile));
				dist = HammingDist.calc(TargetWordCount.keySet(), SourceWordCount.keySet());

				if (dist < minDist) {
					minDist = dist;
					fileName = _searcher.doc(hits[i].doc).get("PATH");
				}			

			}

			return fileName;
		}

}
