package ca.uwindsor.cs.deepweb.estimation.method.border;

import java.io.IOException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.Set;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;

public class QueryRunner {
	protected HashSet<String> querypool;
	protected ArrayList<String> randomwords;
	protected int numofrandomwords;
	protected Matrix matrix;
	protected String corpusindex;
	protected int EstimatedDQP;
	protected int uniquedocumentdownloaded;

	public QueryRunner(String corpusindex, ArrayList<String> pool,
			ArrayList<String> words) {
		this.corpusindex = corpusindex;
		querypool = new HashSet<String>(pool.size());
		for (String s : pool) {
			querypool.add(s.toLowerCase().trim());
		}
		randomwords = words;
		matrix = new Matrix(randomwords.size());
		uniquedocumentdownloaded = 0;
	}

	public int esitmate() throws Exception {
		IndexSearcher searcher = new IndexSearcher(corpusindex);
		IndexReader reader = IndexReader.open(corpusindex);
		TermFreqVector termFreqVector;

		Query query = null;
		Hits hits = null;
		QueryParser queryparser = new QueryParser(
				FilesContentIndexer.FIELD_CONTENT, new StandardAnalyzer());
		int docindexid, inversedocumentweight, i, j, k;
		String[] terms;
		String word;
		int[] docids;

		for (i = 0; i < randomwords.size(); i++) {
			query = queryparser.parse(randomwords.get(i));
			hits = searcher.search(query);

			// get all document id
			ArrayList<String> documents = new ArrayList<String>(hits.length());
			for (j = 0; j < hits.length(); j++) {
				documents.add(hits.doc(j).get(FilesContentIndexer.FIELD_ID));
			}
			docids = matrix.addQuery(i, documents);

			for (j = 0; j < hits.length(); j++) {
				docindexid = hits.id(j);
				if (matrix.hasDocument(docids[j])) {
					continue;
				}
				inversedocumentweight = 0;

				// the following line simulates downloading a document
				termFreqVector = reader.getTermFreqVector(docindexid,
						FilesContentIndexer.FIELD_CONTENT);
				uniquedocumentdownloaded++;

				terms = termFreqVector.getTerms();
				for (k = 0; k < terms.length; k++) {
					word = terms[k].toLowerCase();
					if (querypool.contains(word)) {
						inversedocumentweight++;
					}
				}
				matrix.setInverseDocumentWeight(docids[j],
						inversedocumentweight);
			}
		}
		reader.close();
		searcher.close();
		matrix.calculateQueryWeight();
		return (int) (matrix.getMeanQueryWeight() * querypool.size());
	}

	/**
	 * @see Matrix#getWeightofQueries()
	 */
	public float[] getWeightofQueries() {
		return matrix.getWeightofQueries();
	}

	/**
	 * @see Matrix#getMeanQueryWeight()
	 */
	public float getMeanQueryWeight() {
		return matrix.getMeanQueryWeight();
	}
	
	/**
	 * @return the number of unique document downloaded
	 */
	public int getNumberofUniqueDocumentJDownloaded(){
		return uniquedocumentdownloaded;
	}
	
	/**
	 * @see Matrix#getUniqueDocumentIDs();
	 */
	public Set<Integer> getUniqueDocumentIDs(){
		return matrix.getUniqueDocumentIDs();
	}

	public String toString() {
		StringBuilder s = new StringBuilder();
		float[] weights = matrix.getWeightofQueries();
		for (int i = 0; i < randomwords.size(); i++) {
			s.append(randomwords.get(i));
			s.append("\t");
			s.append(weights[i]);
			s.append("\n");
		}
		return s.toString();
	}

	// for testing only
	public static void main(String[] args) throws IOException {
		// args[0] index directory
		// args[1] querypool file
		// args[2] number of random queries needed
		if (args.length != 3) {
			System.exit(-1);
		}
		RandomWordSelector selector = new RandomWordSelector(
				args[1], "UTF-8");
		ArrayList<String> querypool = selector.getWordList();
		int wordsquantity = Integer.parseInt(args[2]);
		ArrayList<String> words = new ArrayList<String>(wordsquantity);
		for (int i = 0; i < wordsquantity; i++) {
			words.add(selector.getRandomPhrase());
		}
		QueryRunner runner = new QueryRunner(args[0], querypool, words);
		try {
			// runner.esitmate();
			System.out.println(runner.esitmate());
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
