package ca.uwindsor.cs.deepweb.estimation.method.capturehistory;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.Random;
import java.util.Set;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;

import ca.uwindsor.cs.deepweb.estimation.DataCollectorRandomReturns;
import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;

public class HighDFTermQuery {
	protected HashSet<String> unique;
	protected int total;
	protected int marked;
	protected int corpussize;
	protected int maxstepsize;
	protected Random r;
	protected StringBuilder result;
	protected String corpusindex;
	protected String dict;

	/**
	 * @param corpussize
	 * @param times
	 * 
	 */
	public HighDFTermQuery(String corpusindex, String dict) {
		this.corpusindex = corpusindex;
		this.dict = dict;
		result = new StringBuilder();
		// r = new Random();
		unique = new HashSet<String>();

	}

	public void query() throws Exception {
		IndexSearcher searcher = new IndexSearcher(corpusindex);
		Query query = null;
		Hits hits = null;
		QueryParser queryparser = new QueryParser(
				FilesContentIndexer.FIELD_CONTENT, new StandardAnalyzer());

		RandomWordSelector selector = new RandomWordSelector(dict, "UTF-8");
		ArrayList<String> words = selector.getWordList();
		
		Set<String> newid;
		HashSet<String> duplicated = new HashSet<String>();
		double nominator = 0;
		double denominator = 0;
		int orresult;
		double OR;
		float exponentofOR = -2.1f;
		short[] range = new short[2];
		range[0]=1;
		range[1]=40;
		
		DataCollectorRandomReturns datacollector = new DataCollectorRandomReturns(
				FilesContentIndexer.FIELD_ID,range);
		for (int i = 0; i < words.size(); i++) {
			query = queryparser.parse(words.get(i));
			hits = searcher.search(query);

			newid = datacollector.add(hits);
			total += newid.size();

			duplicated.clear();
			duplicated.addAll(newid);
			duplicated.retainAll(unique);
			marked = duplicated.size();

			unique.addAll(newid);

			//
			nominator += newid.size() * unique.size() * unique.size();
			denominator += marked * unique.size();
			//

			//
			OR = total / (double) unique.size();
			orresult = (int) (unique.size() / (1 - Math.pow(OR, exponentofOR)));
			//
			result.append(i+1 + "\t" + newid.size() + "\t" + duplicated.size()
					+ "\t" + unique.size() + "\t"
					+ (int) (nominator / denominator) + "\t" + orresult + "\n");
		}
	}

	// protected HashSet<String> getReturns(int numberofreturns) {
	// HashSet<String> l = new HashSet<String>(numberofreturns);
	// for (int i = 0; i < numberofreturns; i++) {
	// l.add(r.nextInt(corpussize));
	// }
	// return l;
	// }

	public String toString() {
		return result.toString();
	}

	public static void main(String[] args) {
		HighDFTermQuery rs = new HighDFTermQuery(
				"F:\\indexes\\Reuters_grouped",
				"D:\\Research\\estimation\\Result\\Capture_History\\temp.txt");
		try {
			rs.query();
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
		System.out.println(rs.toString());
	}
}
