package ca.uwindsor.cs.deepweb.utility.documentanalysis;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map.Entry;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.TermFreqVector;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;

public class LuceneIndexReader {

	/**
	 * This program will extract all terms stored in a Lucene index and
	 * seperate: 1 Terms that only consists of digits; 2 The rest (not purely
	 * digits)
	 * 
	 * @param args
	 */
	public static void main(String[] args) {

		args = new String[2];
		args[0] = "F:\\indexes\\zhwiki_20090116";
		args[1] = "D:\\Research\\estimation\\Result\\gov2_0_terms_of_4digitterms.txt";
		// args[2] =
		// "/var/research/estimation/experiment/documentanalysis/corrected/gov2_0_terms_of_digit.txt"
		// ;

		// TODO Auto-generated method stub
		if (args.length != 2) {
			System.out
					.println("java -jar LuceneIndexReader.jar indexDir wordlistFile digitlistFile");
			System.exit(0);
		}
		try {
			IndexReader reader = IndexReader.open(args[0]);
			System.out.println("NumDocs = " + reader.numDocs());
			HashMap<String, Integer> wordlist = new HashMap<String, Integer>();
			// HashMap<String, Integer> digitlist = new HashMap<String,
			// Integer>();
			int j;
//			 Pattern patterndigit = Pattern.compile("\\d+");
			// Pattern patternletter = Pattern.compile("[a-z]+");
			 Pattern patternpurelynumbers = Pattern.compile("^(\\d+,*)+$");
//			Pattern patternpurelyletters = Pattern.compile("^([A-Za-z]+,*)+$");
			Matcher map;
			Integer v;
			String line;
			TermFreqVector termFreqVector;
			String[] terms;
			String word;
			for (int i = 0; i < reader.numDocs(); i++) {
				// 对contents的token之后的term存于了TermFreqVector
				termFreqVector = reader.getTermFreqVector(i,
						FilesContentIndexer.FIELD_CONTENT);

				if (termFreqVector == null) {
					System.out.println("termFreqVector is null.");
					continue;
				}

				// String fieldName = termFreqVector.getField();
				terms = termFreqVector.getTerms();
				// int[] frequences = termFreqVector.getTermFrequencies();

				// System.out.println("FieldName:" + fieldName);
				for (j = 0; j < terms.length; j++) {
					word = terms[j];
					// mad = patterndigit.matcher(word);
					// mac = patternletter.matcher(word);
					if (word.length() == 6) {
						map = patternpurelynumbers.matcher(word);
						if (map.find()) {// find
							v = wordlist.get(word);
							if (v == null) {
								wordlist.put(word, 1);
							} else {
								v++;
								wordlist.put(word, v);
							}
						}
					}
				}
				if (i % 10000 == 0) {
					System.gc();
					System.out.println(i + " documents are processed!");
				}
			}
			//
			FileOutputStream fp = new FileOutputStream(args[1]);
			Set<Entry<String, Integer>> s = wordlist.entrySet();
			for (Entry<String, Integer> e : s) {
				line = e.getKey() + "\t" + e.getValue() + "\n";
				fp.write(line.getBytes("UTF-8"));
			}
			fp.flush();
			fp.close();
			//
			// fp = new FileOutputStream(args[2]);
			// s = digitlist.entrySet();
			// for (Entry<String, Integer> e : s) {
			// line = e.getKey() + "\t" + e.getValue() + "\n";
			// fp.write(line.getBytes("UTF-8"));
			// }
			// fp.flush();
			// fp.close();
		} catch (CorruptIndexException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}
	}

}
