package ca.uwindsor.cs.deepweb.estimation.method.poolbasedcoverage;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map.Entry;
import java.util.Set;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.TermFreqVector;
import org.apache.lucene.queryParser.ParseException;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.store.LockObtainFailedException;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;
import ca.uwindsor.cs.deepweb.utility.documentanalysis.TermFrequencyCell;

public class QueryBasedSampleDocuments2 {

	protected String index;

	protected String dict;

	protected String termlist;

	protected String sampleindex;

	protected int size;
	
	protected int corpussize;

	/**
	 * @param index
	 *            the source index
	 * @param dict
	 *            where words selects from
	 * @param termlist
	 *            where terms saves in
	 * @param sampleindex
	 *            where the index of sampled documents stores
	 * @param numberofdocs
	 *            number of documents needed
	 */
	public QueryBasedSampleDocuments2(String index, String dict,
			String termlist, String sampleindex, int numberofdocs) {
		this.index = index;
		this.dict = dict;
		this.termlist = termlist;
		this.sampleindex = sampleindex;
		this.size = numberofdocs;
	}

	public void start() throws LockObtainFailedException, IOException,
			ParseException {
		

		IndexSearcher is = new IndexSearcher(index);
		Query query;
		Hits hits;
		int j = 0;

		RandomWordSelector selector = new RandomWordSelector(dict, "UTF-8");

		boolean flag = true;
		String word;

		ArrayList<Integer> sampleddocumentids = new ArrayList<Integer>();
		QueryParser queryparser = new QueryParser(
				FilesContentIndexer.FIELD_CONTENT, new StandardAnalyzer());

		while (flag) {
			word = selector.getRandomWord();
			query = queryparser.parse(word);
			hits = is.search(query);
			for (j = 0; j < hits.length(); j++) {
				sampleddocumentids.add(hits.id(j));
			}
			if (sampleddocumentids.size() > size) {
				flag = false;
				sampleddocumentids = new ArrayList<Integer>(sampleddocumentids
						.subList(0, size));
			}
		}
		is.close();
		// now the sampled ids are saved in sampleddocumentids

		// TODO build a index of these sampled documents
		Analyzer luceneAnalyzer = new StandardAnalyzer();
		IndexWriter indexWriter = new IndexWriter(sampleindex, luceneAnalyzer,
				true);

		//
		IndexReader reader = IndexReader.open(index);
		Set<String> selectedword = new HashSet<String>();
		TermFreqVector termFreqVector;
		String[] terms;
		corpussize=reader.numDocs();
		HashMap<String, Integer> wordlist = new HashMap<String, Integer>(corpussize);
		Integer v;
		StringBuilder builder;

		for (Integer ID : sampleddocumentids) {
			// 对contents的token之后的term存于了TermFreqVector
			termFreqVector = reader.getTermFreqVector(ID.intValue(),
					FilesContentIndexer.FIELD_CONTENT);

			if (termFreqVector == null) {
				System.out.println("termFreqVector is null.");
				continue;
			}
			//
			terms = termFreqVector.getTerms();
			Document document = new Document();
			builder = new StringBuilder();
			for (j = 0; j < terms.length; j++) {
				word = terms[j];

				builder.append(word);
				builder.append(" ");

				v = wordlist.get(word);
				if (v == null) {
					wordlist.put(word, 1);
				} else {
					v = v.intValue() + 1;
					wordlist.put(word, v);
				}
			}

			Field FieldID = new Field(FilesContentIndexer.FIELD_ID, ID
					.toString(), Field.Store.YES, Field.Index.UN_TOKENIZED);
			Field FieldBody = new Field(FilesContentIndexer.FIELD_CONTENT,
					builder.toString(), Field.Store.NO, Field.Index.TOKENIZED);
			document.add(FieldID);
			document.add(FieldBody);
			indexWriter.addDocument(document);
		}
		reader.close();
		indexWriter.close();

		ArrayList<TermFrequencyCell> list = new ArrayList<TermFrequencyCell>(
				selectedword.size());
		TermFrequencyCell cell;

		Set<Entry<String, Integer>> s = wordlist.entrySet();
		for (Entry<String, Integer> e : s) {
			cell = new TermFrequencyCell(e.getKey(), e.getValue());
			list.add(cell);
		}

		Collections.sort(list);
		Collections.reverse(list);

		FileOutputStream fp = new FileOutputStream(termlist);
		for (TermFrequencyCell c : list) {
			fp.write(c.getTerm().getBytes("UTF-8"));
			fp.write("\n".getBytes("UTF-8"));
		}
		fp.close();
	}
	
	public int getCorpusSize(){
		return this.corpussize;
	}

	/**
	 * @param args
	 * @throws Exception
	 * @throws CorruptIndexException
	 */
	public static void main(String[] args) throws CorruptIndexException,
			Exception {
		if (args.length != 5) {
			System.exit(-1);
		}
		// TODO Auto-generated method stub

		// Sampling Procedure

	}
}
