package ca.uwindsor.cs.deepweb.estimation.method.poolbasedcoverage;

import java.io.FileOutputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Set;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.TermQuery;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;
import ca.uwindsor.cs.deepweb.utility.documentanalysis.TermFrequencyCell;

public class QueryBasedSampleDocuments {

	/**
	 * @param args
	 * @throws Exception
	 * @throws CorruptIndexException
	 */
	public static void main(String[] args) throws CorruptIndexException,
			Exception {
		if(args.length!=3){
			System.exit(-1);
		}
		// TODO Auto-generated method stub
		IndexReader reader = IndexReader.open(args[0]);
//		int corpussize = reader.numDocs();
//		reader.close();

		IndexSearcher is = new IndexSearcher(args[0]);
		Query query;
		Hits hits;
//		float HR = 0, OR = 0;
		int i = 0, j = 0;

//		ArrayList<String> querypool1;
		RandomWordSelector selector = new RandomWordSelector(args[1], "UTF-8");
//		querypool1 = selector.getWordList();

		boolean flag = true;
		String word;
		
		ArrayList<Integer> sampleddocumentids = new ArrayList<Integer>();
		while (flag) {
			word = selector.getRandomWord();
			query = new TermQuery(new Term(FilesContentIndexer.FIELD_CONTENT,
					word));
			hits = is.search(query);
			for (j = 0; j < hits.length(); j++) {
				sampleddocumentids.add(hits.id(j));
			}
			if (sampleddocumentids.size() > 3000) {
				flag = false;
				sampleddocumentids = new ArrayList<Integer>(sampleddocumentids.subList(0, 3000));
			}
		}
		
		Set<String> selectedword = new HashSet<String>();
		TermFreqVector termFreqVector;
		String[] terms;
//		int[] frequences;
		HashMap<String, Integer> wordlist = new HashMap<String, Integer>(
				reader.numDocs());
		Integer v;
		
		for (Integer ID:sampleddocumentids) {
			// 对contents的token之后的term存于了TermFreqVector
			termFreqVector = reader.getTermFreqVector(ID.intValue(),
					FilesContentIndexer.FIELD_CONTENT);

			if (termFreqVector == null) {
				System.out.println("termFreqVector is null.");
				continue;
			}
			//
			// // String fieldName = termFreqVector.getField();
			terms = termFreqVector.getTerms();
//			frequences = termFreqVector.getTermFrequencies();
			//
			// // System.out.println("FieldName:" + fieldName);
			for (j = 0; j < terms.length; j++) {
				word = terms[j];
				v = wordlist.get(word);
				if (v == null) {
					wordlist.put(word, 1);
				} else {
					v = v.intValue() + 1;
					wordlist.put(word, v);
				}
			}
		}
		
//		TermEnum te = reader.terms();
		ArrayList<TermFrequencyCell> list = new ArrayList<TermFrequencyCell>(selectedword.size());
		TermFrequencyCell cell;
		
		Set<Entry<String, Integer>> s = wordlist.entrySet();
		for (Entry<String, Integer> e : s) {
			cell = new TermFrequencyCell(e.getKey(),e.getValue());
			list.add( cell);
		}
		
//		
//		while (te.next()) {
//			word = te.term().text();
//			if(selectedword.contains(word)){
//				cell = new TermFrequencyCell(word,te.docFreq());
//				list.add(cell);
//			}
//		}
		Collections.sort(list);
//		Collections.reverse(list);
		
//		te.close();
		reader.close();
		
		FileOutputStream fp = new FileOutputStream(args[2]);
		for(TermFrequencyCell c: list){
			fp.write(c.getTerm().getBytes("UTF-8"));
			fp.write("\t".getBytes("UTF-8"));
			fp.write(Integer.toString(c.getFrequency()).getBytes("UTF-8"));
			fp.write("\n".getBytes("UTF-8"));
		}
		fp.close();
	}

}
