package ca.uwindsor.cs.deepweb.utility.heapslawzipflaw;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashSet;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermFreqVector;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;

public class Analyzer {

	protected Set<String> unique;
	protected Pattern pad;
	protected Matcher mad;
	protected int newid;
//	protected Set<String> set_previousHitsID;

	/**
	 * 
	 */
	public Analyzer() {
		pad = Pattern.compile("[^a-zA-Z]");
		unique= new HashSet<String>(100000);
//		set_previousHitsID = new HashSet<String>();
	}

	/**
	 * @param collection
	 */
	public void addWords(Collection<String> collection) {
		newid = collection.size();
		unique.addAll(collection);
//		set_previousHitsID.retainAll(collection);
//		newid = newid - set_previousHitsID.size();
//		set_previousHitsID.clear();
//		set_previousHitsID.addAll(collection);
	}

	/**
	 * @param c
	 * @return a collection of filtered words
	 */
	public Collection<String> filter(Collection<String> c) {
		Collection<String> temp = new ArrayList<String>();
		for (String s : c) {
			mad = pad.matcher(s);
			if (!mad.find()) {
				temp.add(s);
			}
		}
		return temp;
	}

	/**
	 * 
	 * @return the number of unique words
	 */
	public int getUniqueSize() {
		return unique.size();
	}

	/**
	 * @return the number of fresh words
	 */
//	public int getFreshWordsSize() {
//		return newid;
//	}

	/**
	 * @param args
	 */
	public static void main(String[] args) {
		if (args.length != 2) {
			System.out
					.println("java -jar LuceneIndexReader.jar indexDir wordlistFile digitlistFile");
			System.exit(0);
		}
		StringBuffer buf = new StringBuffer();
		buf.append("number of new words");
		buf.append("\t");
		buf.append("number of accmulative unique words");
//		buf.append("\t");
//		buf.append("number of fresh words");
		buf.append("\n");
		try {
			IndexReader reader = IndexReader.open(args[0]);
			int num = reader.numDocs();
			System.out.println("NumDocs = " + num);
			TermFreqVector termFreqVector;
			int i;
			String[] terms;
			Analyzer analyzer = new Analyzer();
			Collection<String> termlist, reallist;
			FileOutputStream fp = new FileOutputStream(args[1]);
			fp.write(buf.toString().getBytes("UTF-8"));
			buf.delete(0, buf.length());
			
			for (i = 0; i < num; i++) {
				termFreqVector = reader.getTermFreqVector(i,
						FilesContentIndexer.FIELD_CONTENT);

				if (termFreqVector == null) {
					System.out.println("termFreqVector is null.");
					continue;
				}
				terms = termFreqVector.getTerms();
				termlist = Arrays.asList(terms);
				reallist = analyzer.filter(termlist);
				analyzer.addWords(reallist);

				// getting the table
				buf.append(reallist.size());
				buf.append("\t");
				buf.append(analyzer.getUniqueSize());
//				buf.append("\t");
//				buf.append(analyzer.getFreshWordsSize());
				buf.append("\n");
				
				//
				fp.write(buf.toString().getBytes("UTF-8"));
				fp.flush();
				buf.delete(0, buf.length());
			}
			
			fp.close();
		} catch (CorruptIndexException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

}
