/**
 * 
 */
package ca.uwindsor.cs.deepweb.estimation;

import java.io.IOException;
import java.util.HashSet;
import java.util.Set;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.search.Hits;

/**
 * DataCollector works by collecting the instance of Hits in Lucene. Every
 * instance of Hits added to DataCollector will automatically compute Unique and
 * all queried items in total. When the queries are all finished, the invoker
 * could get the total Unique and amount of all queried items items for
 * analyzing.
 * 
 * @author Liang Jie
 */
public class DataCollector {

	/**
	 * Store the accumulative unique items
	 */
	protected HashSet<String> set_UniqueItems;

	/**
	 * Total returned items queried
	 */
	protected long long_TotalItems;

	/**
	 * The name of the Filed for query
	 */
	protected String string_FieldName;

	/**
	 * The return items of one query
	 */
	protected int returnedItems;

	/**
	 * The previous Hits used in CR method
	 */
	protected HashSet<String> set_previousHitsID;

	/**
	 * The duplicated items compares to last query result
	 */
	protected int int_DuplicatedItemsCompareToLastQuery;

	/**
	 * The number of documents in new Query Result that were already marked
	 */
	protected int int_markedItemsinNewHits;

	/**
	 * Document Frequency Analyzer
	 */
	protected DocumentFrequencyAnalyzer dfanalyzer;

	/**
	 * Toggle of analysis of document frequency
	 */
	protected boolean isAnalyzeDocumentFrequency;

	/**
	 * Sort by computed relevance
	 * 
	 * @see org.apache.lucene.search.Sort#Sort()
	 */
	public static final short SORT_COMPUTED_RELEVANCE = 1;

	/**
	 * Sort by document IDs
	 */
	public static final short SORT_DOCUMENT_ID = 2;

	/**
	 * Sort by document size
	 */
	public static final short SORT_DOCUMENT_SIZE = 3;

	/**
	 * Sort by document title. Sorting results might be the same with those
	 * sorted by document ID as document titles are not always available
	 */
	public static final short SORT_DOCUMENT_TITLE = 4;
	
	/**
	 * Constructor, initialize the objects
	 * 
	 * @param fieldname
	 *            The name of the Field in a Lucene Document which could be
	 *            considered as the Document's ID. It should be short and
	 *            identical to all other Documents. Avoid using content field
	 *            which has a large amount of characters!
	 */
	public DataCollector(String fieldname) {
		string_FieldName = fieldname;
		set_UniqueItems = new HashSet<String>();
		long_TotalItems = 0;
		returnedItems = 0;
		set_previousHitsID = new HashSet<String>();
		dfanalyzer = new DocumentFrequencyAnalyzer(fieldname);
		isAnalyzeDocumentFrequency = false;
	}

	/**
	 * This method will extract all the Documents from Hits (the result of a
	 * query) to a set set_QueriedItems
	 * 
	 * @param hits
	 *            The result of a query from Lucene
	 * @return the set of fresh IDs
	 * @see #set_QueriedItems
	 */
	public Set<String> add(Hits hits) {

		int_markedItemsinNewHits = 0;
		HashSet<String> newhitsids = new HashSet<String>(hits.length());

		// get the size of retured items
		returnedItems = hits.length();

		// calculate the total items that queried
		long_TotalItems += hits.length();

		int offset;
		for (offset = 0; offset < hits.length(); offset++) {
			try {
				String id = hits.doc(offset).get(string_FieldName);
				if (set_UniqueItems.contains(id)) {
					int_markedItemsinNewHits++;
				}
				set_UniqueItems.add(id);
				if (isAnalyzeDocumentFrequency) {
					dfanalyzer.addID(id);
				}
				newhitsids.add(id);
			} catch (CorruptIndexException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			} catch (IOException e) {
				// TODO Auto-generated catch block
				e.printStackTrace();
			}
		}

		// compare with last hits
		set_previousHitsID.retainAll(newhitsids);
		int_DuplicatedItemsCompareToLastQuery = set_previousHitsID.size();

		// warning
		set_previousHitsID = (HashSet<String>) newhitsids.clone();
		return newhitsids;
	}

	/**
	 * get the amount of total unique items
	 * 
	 * @return the amount of total unique items
	 */
	public int getUnique() {
		return set_UniqueItems.size();
	}

	/**
	 * @return The unique id set
	 */
	public Set<String> getUniqueIDs() {
		return this.set_UniqueItems;
	}

	/**
	 * Return the total items queried
	 * 
	 * @return the long_TotalItems
	 */
	public long getTotalItems() {
		return long_TotalItems;
	}

	/**
	 * Get the number of returned items of one query
	 * 
	 * @return the returnedItems
	 */
	public int getReturnedItems() {
		return returnedItems;
	}

	/**
	 * Get the number of duplicated items compare with last query
	 * 
	 * @return the number of duplicated items compare with last query
	 */
	public int getDuplicatedItemsCompareToLastQuery() {
		return int_DuplicatedItemsCompareToLastQuery;
	}

	/**
	 * Get the number of documents in the query result that have been
	 * marked(hit)
	 * 
	 * @return the number of document in the query result that have been
	 *         marked(hit)
	 */
	public int getMarkedItems() {
		return int_markedItemsinNewHits;
	}

	/**
	 * Document Frequency Analyzer
	 * 
	 * @return Document Frequency Analyzer
	 */
	public DocumentFrequencyAnalyzer getDocumentFrequencyAnalyzer() {
		return dfanalyzer;
	}

	/**
	 * To set if it is needed to calculate document frequency and its
	 * distribution
	 * 
	 * @param isAnalyzeDocumentFrequency
	 *            the isAnalyzeDocumentFrequency to set
	 */
	public void setAnalyzeDocumentFrequency(boolean isAnalyzeDocumentFrequency) {
		this.isAnalyzeDocumentFrequency = isAnalyzeDocumentFrequency;
	}

	/**
	 * To return if it is set to calculate document frequency and its
	 * distribution
	 * 
	 * @return the flag
	 */
	public boolean isAnalyzeDocumentFrequency() {
		return isAnalyzeDocumentFrequency;
	}

}
