package ca.uwindsor.cs.deepweb.utility.documentanalysis;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermFreqVector;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;

public class WordsAndCTFExtractor {
	public static void main(String[] args) {

		if (args.length != 2) {
			System.out
					.println("java -jar TermsAndFrequencyExtractor.jar indexDir outputFile");
			System.exit(0);
		}
		try {
			IndexReader reader = IndexReader.open(args[0]);
			System.out.println("NumDocs = " + reader.numDocs());
			HashMap<Integer, Integer> wordlist = new HashMap<Integer, Integer>(
					reader.numDocs());
			int i, j, num;
			Integer v;
			// String line;
			TermFreqVector termFreqVector;
			String[] terms;
			String word;
			int[] frequences;
			num = reader.numDocs();
			Pattern p = Pattern.compile("^[A-Za-z]+$");
			Matcher matcher;

			for (i = 0; i < num; i++) {
				// 对contents的token之后的term存于了TermFreqVector
				termFreqVector = reader.getTermFreqVector(i,
						FilesContentIndexer.FIELD_CONTENT);

				if (termFreqVector == null) {
					System.out.println("termFreqVector is null.");
					continue;
				}
				//
				// // String fieldName = termFreqVector.getField();
				terms = termFreqVector.getTerms();
				frequences = termFreqVector.getTermFrequencies();
				//
				// // System.out.println("FieldName:" + fieldName);
				for (j = 0; j < terms.length; j++) {
					word = terms[j];
					matcher = p.matcher(word);
					if (matcher.find()) {
						v = wordlist.get(word.hashCode());
						if (v == null) {
							wordlist.put(word.hashCode(), 1);
						} else {
							v = v.intValue() + frequences[j];
							wordlist.put(word.hashCode(), v);
						}
					}
				}
				if (i % 10000 == 0) {
					System.gc();
					System.out.println(i + " documents are processed!");
				}
			}
			// //
			reader.close();
			System.gc();
			ArrayList<Integer> list = new ArrayList<Integer>(wordlist.size());
			Set<Entry<Integer, Integer>> s = wordlist.entrySet();
			for (Entry<Integer, Integer> e : s) {
				list.add(e.getValue());
			}
			wordlist.clear();
			System.gc();
			Collections.sort(list);
			Collections.reverse(list);

//			FileOutputStream fpx = new FileOutputStream(args[1]);
			FileOutputStream fpy = new FileOutputStream(args[1]);
			for (i = 0; i < list.size(); i++) {
				Integer cell = list.get(i);
//				fpx.write(Double.toString(Math.log10(i+1)).getBytes());
//				fpx.write(Integer.toString(i+1).getBytes());
//				fpx.write(" ".getBytes());
//				fpx.flush();

				fpy.write(Integer.toString(cell.intValue())
						.getBytes());
				fpy.write(" ".getBytes());
				fpy.flush();
				// line = Math.log10(i+1) + "\t" + Math.log10(cell.intValue()) +
				// "\n";

			}
//			fpx.close();
			fpy.close();

//			fpx = new FileOutputStream(args[2]);
////			fpy = new FileOutputStream(args[4]);
//			for (i = 0; i < 1000; i++) {
//				Integer cell = list.get(i);
////				fpx.write(Double.toString(Math.log10(i+1)).getBytes());
////				fpx.write(Integer.toString(i+1).getBytes());
////				fpx.write(" ".getBytes());
////				fpx.flush();
//
//				fpy.write(Double.toString(cell.intValue())
//						.getBytes());
//				fpy.write(" ".getBytes());
//				fpy.flush();
//				// line = Math.log10(i+1) + "\t" + Math.log10(cell.intValue()) +
//				// "\n";
//
//			}
//			fpx.close();
//			fpy.close();
		} catch (CorruptIndexException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}
}
