package ca.uwindsor.cs.deepweb.estimation.method.poolbasedcoverage;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;

public class WordSearchAndCoverage {

	protected String termlistfile;

	protected String sampleindex;

	protected String selectedtermfile;
	
	protected String[] selectedterms;

	/**
	 * @param termlistfile
	 * @param sampleindex
	 * @param selectedtermfile
	 */
	public WordSearchAndCoverage(String termlistfile, String sampleindex,
			String selectedtermfile) {
		this.termlistfile = termlistfile;
		this.sampleindex = sampleindex;
		this.selectedtermfile = selectedtermfile;
	}
	
	public void search(float coverage) throws IOException{
		ArrayList<String> words;
		RandomWordSelector selector = new RandomWordSelector(termlistfile, "UTF-8");
		words = selector.getWordList();
		int startingposition = 1;
		float HR = 0;
		IndexReader reader = IndexReader.open(sampleindex);
		int corpussize = reader.numDocs();
		reader.close();
		Set<String> h = new HashSet<String>();
		long total = 0;
		IndexSearcher is = new IndexSearcher(sampleindex);
		Query query;
		Hits hits;
		int i = 0, j = 0;
		FileOutputStream fp = new FileOutputStream(selectedtermfile);
		ArrayList<String> selectedword = new ArrayList<String>();
		String q;
		
		for (i = startingposition; i < words.size(); i++) {
			q = words.get(i);
			query = new TermQuery(
					new Term(FilesContentIndexer.FIELD_CONTENT, q));
			selectedword.add(q);
			hits = is.search(query);
			total += hits.length();
			for (j = 0; j < hits.length(); j++) {
				h.add(hits.doc(j).get(FilesContentIndexer.FIELD_ID));
			}
			HR = (float) h.size() / corpussize;
//			OR = total / (float) h.size();

			if (HR > coverage && (i - startingposition) > 1000) {
				break;
			}
		}

		is.close();
		selectedterms = selectedword.toArray(new String[selectedword.size()]);
		for (String word : selectedword) {
			fp.write(word.getBytes("UTF-8"));
			fp.write("\n".getBytes("UTF-8"));
		}
		fp.close();
	}
	
	public String[] getSelectedTerms(){
		return this.selectedterms;
	}

	public static float checkHR(String index, List<String> words)
			throws CorruptIndexException, IOException, ParseException {
		IndexReader reader = IndexReader.open(index);
		int corpussize = reader.numDocs();
		reader.close();
		Set<String> h = new HashSet<String>();
		IndexSearcher is = new IndexSearcher(index);
		Query query;
		Hits hits;
		QueryParser queryparser = new QueryParser(
				FilesContentIndexer.FIELD_CONTENT, new StandardAnalyzer());
		for (String word : words) {
			// query = new TermQuery(new
			// Term(FilesContentIndexer.FIELD_CONTENT,word));
			query = queryparser.parse(word);
			hits = is.search(query);
			for (int i = 0; i < hits.length(); i++) {
				h.add(hits.doc(i).get(FilesContentIndexer.FIELD_ID));
			}
		}
		is.close();
		return (float) h.size() / corpussize;
	}

	/**
	 * @param args
	 * @throws IOException
	 */
	public static void main(String[] args) throws IOException {
		if (args.length != 7) {
			System.exit(-1);
		}
		
	}

}
