package ca.uwindsor.cs.deepweb.estimation.experiment.service;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.Map.Entry;
import java.util.Set;
import java.util.StringTokenizer;

import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.Term;
import org.apache.lucene.queryParser.QueryParser;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.apache.lucene.search.Sort;
import org.apache.lucene.search.TermQuery;

import au.com.bytecode.opencsv.CSVWriter;
import ca.uwindsor.cs.deepweb.estimation.DataCollector;
import ca.uwindsor.cs.deepweb.estimation.DataCollectorWithLimitationRandom;
import ca.uwindsor.cs.deepweb.estimation.DataCollectorWithLimitationTop;
import ca.uwindsor.cs.deepweb.estimation.DocumentFrequencyAnalyzer;
import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.experiment.QueryRunner;
import ca.uwindsor.cs.deepweb.estimation.method.border.MatrixDEV;
import ca.uwindsor.cs.deepweb.estimation.method.border.TermsWeightDistribution;
import ca.uwindsor.cs.deepweb.utility.FileSizeDistributionAnalyzer;
import ca.uwindsor.cs.deepweb.utility.documentanalysis.LuceneIndexTermsAnalyzer;

public class QueryService extends QueryRunner {

	protected ArrayList<String[]> arraylist_record;

	protected String sizeindex;
	protected String docuniquetermsindex;

	protected boolean isRepeatedDocumentIndex;

	protected String uniqueIDFieldName;

	protected int unique;
	protected long accumulcativetotal;

	protected float precentagecovered;
	protected float OR;
	protected int duplicatedfromlastquery;
	protected int estimatedsize;
	protected String[] result;

	protected boolean isOutputORmethodResult;
	protected boolean isOutputTermWeightAndNumofDoc;
	protected boolean isOutputTermWeightDistribution;
	protected boolean isOutputDocumentCaptureFrequencyDistribution;
	protected boolean isOutputDocumentSizeDistribution;
	protected boolean isOutputDocumentFrequency;
	protected boolean isOutputDocumentSize;
	protected boolean isOutputDocumentUniqueAndTotalTerms;
	protected boolean isOutputDocumentWeight;
	protected boolean isLimitedReturnTop;

	protected short sortmethod;

	protected boolean isReverseSorting;

	protected float exponentofOR;
	
	protected MatrixDEV matrix;

	/**
	 * @param indexdir
	 * @param corpussize
	 * @param termslist
	 * @param stepsize
	 * @param sizeindexdir
	 * @param isRepeated
	 * @throws IOException 
	 */
	public QueryService(String indexdir, int corpussize, String[] termslist,
			int stepsize, String sizeindexdir, boolean isRepeated) throws IOException {
		super(indexdir, corpussize, termslist, null, "0", null);
		// TODO Auto-generated constructor stub
		this.stepsize = stepsize;
		arraylist_record = new ArrayList<String[]>();
		sizeindex = sizeindexdir;
		isRepeatedDocumentIndex = isRepeated;
		if (isRepeatedDocumentIndex) {
			uniqueIDFieldName = FilesContentIndexer.FIELD_INTERNAL_ID;
		} else {
			uniqueIDFieldName = FilesContentIndexer.FIELD_ID;
		}
		isOutputTermWeightAndNumofDoc = false;
		isOutputORmethodResult = true;
		isOutputDocumentCaptureFrequencyDistribution = true;
		isOutputDocumentSizeDistribution = true;
		isOutputDocumentFrequency = false;
		isOutputDocumentSize = false;
		isOutputDocumentUniqueAndTotalTerms = false;
		isOutputDocumentWeight = false;
		isLimitedReturnTop = false;
		sortmethod = 0;
		isReverseSorting = false;
		exponentofOR = -2.2f / 2;
	}

	protected void initialize() {
		queryparser = new QueryParser(FilesContentIndexer.FIELD_CONTENT,
				new StandardAnalyzer());
	}

	public Set<String> goQuery(String term) throws Exception {
		searcher = new IndexSearcher(indexDir);
		Query query = null;
		Hits hits = null;
		query = queryparser.parse(term);
		switch (sortmethod) {
		case DataCollector.SORT_COMPUTED_RELEVANCE:
			hits = searcher.search(query, new Sort());
			break;
		case DataCollector.SORT_DOCUMENT_ID:
			hits = searcher.search(query, new Sort(
					FilesContentIndexer.FIELD_ID, isReverseSorting));
			break;
		case DataCollector.SORT_DOCUMENT_SIZE:
			hits = searcher.search(query, new Sort(
					FilesContentIndexer.FIELD_SIZE, isReverseSorting));
			break;
		case DataCollector.SORT_DOCUMENT_TITLE:
			hits = searcher.search(query, new Sort(
					FilesContentIndexer.FIELD_TITLE, isReverseSorting));
			break;
		default:
			hits = searcher.search(query);
			break;
		}
		datacollector = isLimitedReturnTop ? new DataCollectorWithLimitationTop(
				uniqueIDFieldName, stepsize)
				: new DataCollectorWithLimitationRandom(uniqueIDFieldName,
						stepsize);
		datacollector.setAnalyzeDocumentFrequency(false);
		searcher.close();
		return datacollector.add(hits);
	}

	public String goQuery() throws Exception {
		datacollector = isLimitedReturnTop ? new DataCollectorWithLimitationTop(
				uniqueIDFieldName, stepsize)
				: new DataCollectorWithLimitationRandom(uniqueIDFieldName,
						stepsize);
		datacollector.setAnalyzeDocumentFrequency(false);

		matrix = new MatrixDEV(uniqueIDFieldName, termslist);

		searcher = new IndexSearcher(indexDir);
		
		return query();
	}
	

	public String query() throws Exception{
		Hits hits = null;
		Query query = null;
		int listlength = termslist.length;
		int i;
		Set<String> newids;
		boolean isCalculateDF = (isOutputDocumentFrequency
				|| isOutputDocumentCaptureFrequencyDistribution
				|| isOutputTermWeightAndNumofDoc
				|| isOutputTermWeightDistribution || isOutputDocumentWeight) ? true
				: false;
		for (i = 0; i < listlength; i++) {
			query = queryparser.parse(termslist[i].trim());
//			query = new TermQuery(new Term(FilesContentIndexer.FIELD_CONTENT,termslist[i].trim()));
			switch (sortmethod) {
			case DataCollector.SORT_COMPUTED_RELEVANCE:
				hits = searcher.search(query, new Sort());
				break;
			case DataCollector.SORT_DOCUMENT_ID:
				hits = searcher.search(query, new Sort(
						FilesContentIndexer.FIELD_ID, isReverseSorting));
				break;
			case DataCollector.SORT_DOCUMENT_SIZE:
				hits = searcher.search(query, new Sort(
						FilesContentIndexer.FIELD_SIZE, isReverseSorting));
				break;
			case DataCollector.SORT_DOCUMENT_TITLE:
				hits = searcher.search(query, new Sort(
						FilesContentIndexer.FIELD_TITLE, isReverseSorting));
				break;
			default:
				hits = searcher.search(query);
				break;
			}
			newids = datacollector.add(hits);
			if (isOutputORmethodResult) {
				generateORmethodResult(termslist[i]);
			}
			if (isCalculateDF) {
				matrix.addQuery(i, newids);
			}
		}
		searcher.close();

		if (isCalculateDF) {
			matrix.calculateInverseDocumentWeight();
		}
		String[] record = new String[2];

		/* create xml file */
		StringBuilder resultxml = new StringBuilder();
		resultxml.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>");
		resultxml.append("\n<queryresult>");

		// Java CSV to store OR method result
		StringWriter stringwriter = new StringWriter();
		CSVWriter csvwriter = new CSVWriter(stringwriter);
		String[] head;
		if (isOutputORmethodResult) {
			head = new String[] { "Query word", "Accumulative Unique",
					"Accumulative Total", "OR", "Percentage", "Estimated Size",
					"Query Return", "Duplicated from Last Query Result",
					"Marked" };
			csvwriter.writeNext(head);
			csvwriter.writeAll(arraylist_record);
			csvwriter.flush();
			resultxml.append("\n<ORmethodresult format=\"CSV\">");
			resultxml.append(stringwriter.toString());
			resultxml.append("</ORmethodresult>");
			csvwriter.close();
			stringwriter.close();
		}

		DocumentFrequencyAnalyzer dfa = new DocumentFrequencyAnalyzer();
		/* java CSV to store document frequency */
		if (isOutputDocumentFrequency) {
			// Set<Entry<String, Integer>> s = mwords.getInverseDocumentWeight()
			// .entrySet();
			resultxml.append("\n<documentfrequency format=\"CSV\">");
			// HashMap<String, Integer> freqmap = datacollector
			// .getDocumentFrequencyAnalyzer().getDocumentFrequency();
			head = new String[] { "Term", "DocumentFrequency" };
			stringwriter = new StringWriter();
			csvwriter = new CSVWriter(stringwriter);
			csvwriter.writeNext(head);
			int f;
			for (i = 0; i < termslist.length; i++) {
				record = new String[2];
				f = matrix.getDocumentFrequencybyTerm(i);
				record[0] = termslist[i];
				record[1] = Integer.toString(f);
				csvwriter.writeNext(record);
			}
			csvwriter.flush();
			resultxml.append(stringwriter.toString());
			resultxml.append("</documentfrequency>");
			csvwriter.close();
			stringwriter.close();
		}

		/* java CSV to store document capture frequency distribution */
		if (isOutputDocumentCaptureFrequencyDistribution) {

			for (int f : matrix.getInverseDocumentWeight()) {
				dfa.addFrequency(f);
			}

			resultxml.append("\n<capturefrequencydistribution format=\"CSV\">");
			Set<Entry<Integer, Integer>> sf = dfa.getFrequencyMap().entrySet();
			head = new String[] { "occurrence", "quatity" };
			stringwriter = new StringWriter();
			csvwriter = new CSVWriter(stringwriter);
			csvwriter.writeNext(head);
			for (Entry<Integer, Integer> v : sf) {
				record[0] = v.getKey().toString();
				record[1] = v.getValue().toString();
				csvwriter.writeNext(record);
			}
			csvwriter.flush();

			resultxml.append(stringwriter.toString());
			resultxml.append("</capturefrequencydistribution>");
			csvwriter.close();
			stringwriter.close();
		}

		/* Java CSV to store document weight */
		if (isOutputDocumentWeight) {
			resultxml.append("\n<documentweight format=\"CSV\">");
			head = new String[] { "Document", "Weight" };
			stringwriter = new StringWriter();
			csvwriter = new CSVWriter(stringwriter);
			csvwriter.writeNext(head);
			record = new String[2];

			Set<Entry<String, Integer>> l = matrix.getUniqueIDset().entrySet();
			int[] idw = matrix.getInverseDocumentWeight();
			for(Entry<String, Integer> e : l){
				record[0] = e.getKey();
				record[1] = Float.toString(1 / (float)idw[e.getValue().intValue()]);
				csvwriter.writeNext(record);
			}
			csvwriter.flush();
			resultxml.append(stringwriter.toString());
			resultxml.append("</documentweight>");
			csvwriter.close();
			stringwriter.close();
		}

		Set<String> uniqueset = datacollector.getUniqueIDs();

		if (!isRepeatedDocumentIndex
				&& (isOutputDocumentSize || isOutputDocumentSizeDistribution)) {

			/* Java CSV to store size distribution */
			if (isOutputDocumentSize) {
				head = new String[] { "ID", "size" };
				stringwriter = new StringWriter();
				csvwriter = new CSVWriter(stringwriter);
				csvwriter.writeNext(head);
			}

			/* Java CSV to store document size or size distribution */
			FileSizeDistributionAnalyzer sizeanalyzer = new FileSizeDistributionAnalyzer(
					(short)1000, null);
			// load the size index
			searcher = new IndexSearcher(sizeindex);
			Document sizedoc;
			for (String id : uniqueset) {
				Term t = new Term(FilesContentIndexer.FIELD_ID, id);
				query = new TermQuery(t);
				hits = searcher.search(query);
				if (hits.length() != 0) {
					sizedoc = hits.doc(0);

					// if (sizedoc != null) {
					long size = new Long(sizedoc
							.get(FilesContentIndexer.FIELD_SIZE));

					/* Java CSV to calculate size distribution */
					if (isOutputDocumentSizeDistribution) {
						sizeanalyzer.addSize(size);
					}
					/* Java CSV to store document size */
					if (isOutputDocumentSize) {
						record[0] = id;
						record[1] = Long.toString(size);
						csvwriter.writeNext(record);
					}
				}
			}
			searcher.close();

			/* Java CSV to store document size */
			if (isOutputDocumentSize) {
				csvwriter.flush();
				resultxml.append("\n<documentsize format=\"CSV\">");
				resultxml.append(stringwriter.toString());
				resultxml.append("</documentsize>");
				csvwriter.close();
				stringwriter.close();
			}

			/* Java CSV to store size distribution */
			if (isOutputDocumentSizeDistribution) {
				ArrayList<Integer> sizedistrib = sizeanalyzer.getDistribution();

				head = new String[] { "Interval", "Quality" };
				stringwriter = new StringWriter();
				csvwriter = new CSVWriter(stringwriter);
				csvwriter.writeNext(head);
				int size = sizedistrib.size();
				if (size != 0) {
					record[0] = "less than or equals to 1k letter";
					record[1] = sizedistrib.get(0).toString();
					csvwriter.writeNext(record);

					for (i = 1; i < size; i++) {
						sizedistrib.set(i, new Integer(sizedistrib.get(i)
								+ sizedistrib.get(i - 1)));
						record[0] = new String("less than or equals to "
								+ (i + 1) + "k letter");
						record[1] = new Integer(sizedistrib.get(i)).toString();
						csvwriter.writeNext(record);
					}
				}
				csvwriter.flush();

				resultxml.append("\n<sizedistribution format=\"CSV\">");
				resultxml.append(stringwriter.toString());
				resultxml.append("</sizedistribution>");
				csvwriter.close();
				stringwriter.close();
			}
		}

		// Output Term weight and its frequency

		if (isOutputTermWeightAndNumofDoc || isOutputTermWeightDistribution) {
			TermsWeightDistribution weightdistribution;
			weightdistribution = new TermsWeightDistribution();
			matrix.calculateQueryWeight();

			int num = 0;
			float w;
			record = new String[3];
			if (isOutputTermWeightAndNumofDoc) {
				head = new String[] { "Term", "Weight", "DocumentFrequency" };
				stringwriter = new StringWriter();
				csvwriter = new CSVWriter(stringwriter);
				csvwriter.writeNext(head);
			}

			float[] qw = matrix.getQueryWeight();
			for (i = 0; i < termslist.length; i++) {
				w = qw[i];
				if (isOutputTermWeightAndNumofDoc) {
					record[0] = termslist[i];
					record[1] = Float.toString(w);
					num = matrix.getDocumentFrequencybyTerm(i);
					record[2] = Integer.toString(num);
					csvwriter.writeNext(record);
				}
				if (isOutputTermWeightDistribution) {
					weightdistribution.addWeight(w);
				}
			}

			if (isOutputTermWeightAndNumofDoc) {
				csvwriter.flush();

				resultxml.append("\n<termweightandnumofdocs format=\"CSV\">\n");
				resultxml.append(stringwriter.toString());
				resultxml.append("</termweightandnumofdocs>");
				csvwriter.close();
				stringwriter.close();
			}

			if (isOutputTermWeightDistribution) {
				head = new String[] { "Weight", "Frequency" };
				record = new String[2];
				stringwriter = new StringWriter();
				csvwriter = new CSVWriter(stringwriter);
				csvwriter.writeNext(head);

				Set<Entry<Float, Integer>> weightdist = weightdistribution
						.getWeightdistribution().entrySet();
				for (Entry<Float, Integer> e : weightdist) {
					record[0] = "weight close to " + e.getKey();
					record[1] = e.getValue().toString();
					csvwriter.writeNext(record);
				}
				csvwriter.flush();

				resultxml.append("\n<termweightfrequencydist format=\"CSV\">\n");
				resultxml.append(stringwriter.toString());
				resultxml.append("</termweightfrequencydist>");
				csvwriter.close();
				stringwriter.close();
			}
		}

		/* Java CSV to store document and its unique and total terms */
		if (isOutputDocumentUniqueAndTotalTerms) {
			// load the size index
			searcher = new IndexSearcher(docuniquetermsindex);
			Document doc;
			head = new String[] { "ID", "TheNumberOfUniqueTerms",
					"TheNumberOfTotalTerms" };
			record = new String[3];
			stringwriter = new StringWriter();
			csvwriter = new CSVWriter(stringwriter);
			csvwriter.writeNext(head);
			for (String id : uniqueset) {
				Term t = new Term(FilesContentIndexer.FIELD_ID, id);
				query = new TermQuery(t);
				hits = searcher.search(query);
				if (hits.length() != 0) {
					doc = hits.doc(0);
					// if (doc != null) {
					record[0] = id;
					record[1] = doc.get(LuceneIndexTermsAnalyzer.FIELD_UNIQUE);
					record[2] = doc.get(LuceneIndexTermsAnalyzer.FIELD_TOTAL);
					csvwriter.writeNext(record);
				}
			}
			searcher.close();
			csvwriter.flush();

			resultxml.append("\n<documentuniqueandtotalterms format=\"CSV\">");
			resultxml.append(stringwriter.toString());
			resultxml.append("</documentuniqueandtotalterms>");
			csvwriter.close();
			stringwriter.close();
		}

		/* finalize */
		resultxml.append("\n</queryresult>");

		return resultxml.toString();
	}
	
	protected void generateORmethodResult(String word) {
		unique = datacollector.getUnique();
		accumulcativetotal = datacollector.getTotalItems();
		duplicatedfromlastquery = datacollector
				.getDuplicatedItemsCompareToLastQuery();

		if (unique == 0) {
			OR = 0;
		} else {
			OR = accumulcativetotal / (float) unique;
		}
		precentagecovered = (float) unique / corpus_size;

		estimatedsize = (int) Double.doubleToLongBits(Double
				.longBitsToDouble(unique)
				/ (1 - Math.pow(OR, exponentofOR)));

		result = new String[9];

		result[0] = word;
		result[1] = Integer.toString(unique);
		result[2] = Long.toString(accumulcativetotal);
		result[3] = Float.toString(OR);
		result[4] = Float.toString(precentagecovered);
		if (OR == 0.0) {
			result[5] = "-";
		} else {
			result[5] = Integer.toString(estimatedsize);
		}
		result[6] = Integer.toString(datacollector.getReturnedItems());
		result[7] = Integer.toString(duplicatedfromlastquery);
		result[8] = Integer.toString(datacollector.getMarkedItems());

		arraylist_record.add(result);
	}

	/**
	 * @return The data collector
	 */
	public Set<String> getUniqueIDSet() {
		return datacollector.getUniqueIDs();
	}

	/**
	 * @param indexdir
	 *            the document and its number of unique terms index directory
	 */
	public void setOuputDocumentUniqueAndTotalTerms(String indexdir) {
		if (indexdir == null) {
			this.isOutputDocumentUniqueAndTotalTerms = false;
		} else {
			this.isOutputDocumentUniqueAndTotalTerms = true;
			docuniquetermsindex = indexdir;
		}
	}

	/**
	 * @return the isOutputDocumentUniqueAndTotalTerms
	 */
	public boolean isOuputDocumentUniqueAndTotalTerms() {
		return isOutputDocumentUniqueAndTotalTerms;
	}

	/**
	 * @param isOutputDocumentCaptureFrequencyDistribution
	 *            the isOutputDocumentCaptureFrequencyDistribution to set
	 */
	public void setOutputDocumentCaptureFrequencyDistribution(
			boolean isOutputDocumentCaptureFrequencyDistribution) {
		this.isOutputDocumentCaptureFrequencyDistribution = isOutputDocumentCaptureFrequencyDistribution;
	}

	/**
	 * @return the isOutputDocumentCaptureFrequencyDistribution
	 */
	public boolean isOutputDocumentFrequencyDistribution() {
		return isOutputDocumentCaptureFrequencyDistribution;
	}

	/**
	 * @param isOutputDocumentSizeDistribution
	 *            the isOutputDocumentSizeDistribution to set
	 */
	public void setOutputDocumentSizeDistribution(
			boolean isOutputDocumentSizeDistribution) {
		this.isOutputDocumentSizeDistribution = isOutputDocumentSizeDistribution;
	}

	/**
	 * @return the isOutputDocumentSizeDistribution
	 */
	public boolean isOutputDocumentSizeDistribution() {
		return isOutputDocumentSizeDistribution;
	}

	/**
	 * @param isOutputDocumentSize
	 *            the isOutputDocumentSize to set
	 */
	public void setOutputDocumentSize(boolean isOutputDocumentSize) {
		this.isOutputDocumentSize = isOutputDocumentSize;
	}

	/**
	 * @return the isOutputDocumentSize
	 */
	public boolean isOutputDocumentSize() {
		return isOutputDocumentSize;
	}

	/**
	 * @param isOutputDocumentFrequency
	 *            the isOutputDocumentFrequency to set
	 */
	public void setOutputDocumentFrequency(boolean isOutputDocumentFrequency) {
		this.isOutputDocumentFrequency = isOutputDocumentFrequency;
	}

	/**
	 * @return the isOutputDocumentFrequency
	 */
	public boolean isOutputDocumentFrequency() {
		return isOutputDocumentFrequency;
	}

	/**
	 * @param isOutputORmethodResult
	 *            the isOutputORmethodResult to set
	 */
	public void setOutputORmethodResult(boolean isOutputORmethodResult) {
		this.isOutputORmethodResult = isOutputORmethodResult;
	}

	/**
	 * @return the isOutputORmethodResult
	 */
	public boolean isOutputORmethodResult() {
		return isOutputORmethodResult;
	}

	/**
	 * @param isOutputTermWeightAndNumofDoc
	 *            the isOutputTermWeightAndNumofDoc to set
	 */
	public void setOutputTermWeightandNumberofDocument(
			boolean isOutputTermWeightAndNumofDoc) {
		this.isOutputTermWeightAndNumofDoc = isOutputTermWeightAndNumofDoc;
	}

	/**
	 * @return the isOutputTermWeightAndNumofDoc
	 */
	protected boolean isOutputTermWeightandDF() {
		return isOutputTermWeightAndNumofDoc;
	}

	/**
	 * @param isOutputTermWeightDistribution
	 *            the isOutputTermWeightDistribution to set
	 */
	public void setOutputTermWeightDistribution(
			boolean isOutputTermWeightDistribution) {
		this.isOutputTermWeightDistribution = isOutputTermWeightDistribution;
	}

	/**
	 * @return the isOutputTermWeightDistribution
	 */
	public boolean isOutputTermWeightDistribution() {
		return isOutputTermWeightDistribution;
	}

	/**
	 * @param isOutputDocumentWeight
	 *            the isOutputDocumentWeight to set
	 */
	public void setOutputDocumentWeight(boolean isOutputDocumentWeight) {
		this.isOutputDocumentWeight = isOutputDocumentWeight;
	}

	/**
	 * @return the isOutputDocumentWeight
	 */
	public boolean isOutputDocumentWeight() {
		return isOutputDocumentWeight;
	}

	/**
	 * Set sorting method using the following options: Computed Relevance:
	 * DataCollector.SORT_COMPUTED_RELEVANCE Document ID (File name):
	 * DataCollector.SORT_DOCUMENT_ID Document Title:
	 * DataCollector.SORT_DOCUMENT_SIZE Document Size:
	 * DataCollector.SORT_DOCUMENT_TITLE
	 * 
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_COMPUTED_RELEVANCE
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_ID
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_SIZE
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_TITLE
	 * @param sortmethod
	 *            the sorting method to set
	 */
	public void setSortmethod(short sortmethod) {
		this.sortmethod = sortmethod;
	}

	/**
	 * Get the sorting method to use Computed Relevance:
	 * DataCollector.SORT_COMPUTED_RELEVANCE Document ID (File name):
	 * DataCollector.SORT_DOCUMENT_ID Document Title:
	 * DataCollector.SORT_DOCUMENT_SIZE Document Size:
	 * DataCollector.SORT_DOCUMENT_TITLE
	 * 
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_COMPUTED_RELEVANCE
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_ID
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_SIZE
	 * @see ca.uwindsor.cs.liangjie.research.estimation.DataCollector#SORT_DOCUMENT_TITLE
	 * @return the sortmethod
	 */
	public int getSortmethod() {
		return sortmethod;
	}

	/**
	 * @param isReverseSorting
	 *            the isReverseSorting to set
	 */
	public void setReverseSorting(boolean isReverseSorting) {
		this.isReverseSorting = isReverseSorting;
	}

	/**
	 * @return the isReverseSorting
	 */
	public boolean IsReverseSorting() {
		return isReverseSorting;
	}

	/**
	 * Get the exponent of OR.
	 * 
	 * @return exponent of OR
	 */
	public float getExponentofOR() {
		return this.exponentofOR;
	}

	/**
	 * Set the exponent of OR
	 * 
	 * @param e
	 *            the exponent of OR
	 */
	public void setExponnentofOR(float e) {
		this.exponentofOR = e;
	}

	/**
	 * set how to return document after a query
	 * 
	 * @param r
	 */
	public void setReturnMethod(short r) {
		if (r == Return_Random) {
			isLimitedReturnTop = false;
		} else if (r == Return_Top) {
			isLimitedReturnTop = true;
		}
	}

	public static void main(String[] args) {
		// String[] list = new String[] { "kGC", "provisionally"};

		String WORK_DIR = "D:\\Research\\estimation\\";
		String DIC = WORK_DIR + "Dictionary\\" // +"cluster2"
				+ "webster"
				// +"lowerEnd"
				// +"baseWords"
				// + "newsgroup\\pop_cluster_1000"
				// +"dicNG5000"
				// +"temp"
				// + "webster" // +"ohsumed//random1000"
				// +"medPop100_3000"
				+ ".txt";
		try {
			ArrayList<String> list = new ArrayList<String>();
			BufferedReader in = new BufferedReader(new FileReader(DIC));
			String line = in.readLine();
			StringTokenizer stk;
			int wordsquantity = 0;
			while (((line = in.readLine()) != null)) {
				stk = new StringTokenizer(line,
						" 0123456789!\t\n\r!@#$%^&*()_+{}|:<>?[];',.'");
				// String queryString;
				if (stk.hasMoreTokens()) {
					String word = stk.nextToken();
					word.trim().toLowerCase();
					if (word.length() == 9
							&& !(word.endsWith("s") || word.endsWith("ed")
									|| word.endsWith("ing")
									|| word.endsWith("er") || word
									.endsWith("ion"))) {
						list.add(word);
						wordsquantity++;
						// System.out.println(word);
					}
					if (wordsquantity == 20) {
						break;
					}
				}
				// e.addTerm(line);
			}
			// try {
			list.clear();
			list.add("Query1");
			list.add("Query2");
			list.add("Query3");
			list.add("Query4");
			QueryService qs = new QueryService(args[0], 5, list
					.toArray(new String[list.size()]), 5,
					"F:\\DocumentSizeIndexes\\gov2_greaterthan2048letter",
					false);
			qs.setStepSize(5);
			// qs.setOuputDocumentUniqueTerms(indexdir)
			qs.setOutputDocumentCaptureFrequencyDistribution(true);
			// qs.setOuputDocumentUniqueAndTotalTerms(
			// "F:\\DocumentNumberofUniqueTotalTermsIndex\\GOV2_doc_terms_unique_total\\0"
			// );
			qs.setOutputORmethodResult(true);
			qs.setOutputDocumentFrequency(false);
			qs.setOutputDocumentSize(false);
			qs.setOutputDocumentSizeDistribution(false);
			qs.setOutputTermWeightandNumberofDocument(false);
			qs.setOutputTermWeightDistribution(false);
			qs.setOutputDocumentWeight(true);
			// qs.setReturnMethod(Return_Top);
			String result = qs.goQuery();

			System.out.println(result);
		} catch (Exception e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

}
