package ca.uwindsor.cs.deepweb.utility.documentanalysis;

import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;

import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;

import ca.uwindsor.cs.deepweb.estimation.FilesContentIndexer;
import ca.uwindsor.cs.deepweb.estimation.RandomWordSelector;

public class TermDocumentFrequencyExtractor {

	protected String index;

	protected String[] terms;

	/**
	 * @param index
	 * @param terms
	 */
	public TermDocumentFrequencyExtractor(String index, String[] terms) {
		this.index = index;
		this.terms = terms;
	}

	/**
	 * @return the docfreq
	 * @throws CorruptIndexException
	 * @throws IOException
	 */
	public int[] getDocumentFrequency() throws CorruptIndexException,
			IOException {
		int[] df = new int[terms.length];

		IndexReader reader = IndexReader.open(index);
		for (int i = 0; i < terms.length; i++) {
			df[i] = reader.docFreq(new Term(FilesContentIndexer.FIELD_CONTENT,
					terms[i]));
		}

		return df;
	}

	/**
	 * @param args
	 * @throws IOException
	 */
	public static void main(String[] args) {
		if (args.length != 3) {
			System.out
					.println("java -jar LuceneIndexReader.jar indexDir outputDir");
			System.exit(-1);
		}
		try {
			RandomWordSelector selector = new RandomWordSelector(args[0],
					"UTF-8");
			String[] words = selector.getWordList().toArray(
					new String[selector.getWordList().size()]);
			TermDocumentFrequencyExtractor e = new TermDocumentFrequencyExtractor(
					args[1], words);
			int df[] = e.getDocumentFrequency();
			ArrayList<TermFrequencyCell> list = new ArrayList<TermFrequencyCell>(
					words.length);
			for (int i = 0; i < words.length; i++) {
				list.add(new TermFrequencyCell(words[i], df[i]));
			}
			Collections.sort(list);

			String line;

			FileOutputStream fp = new FileOutputStream(args[2]);
			// for (Integer i : df) {
			// System.out.println(i.intValue());
			// }
			for (TermFrequencyCell cell : list) {
				line = cell.getTerm() + "\t" + cell.getFrequency() + "\n";
				fp.write(line.getBytes("UTF-8"));
			}
			fp.flush();
			fp.close();
		} catch (CorruptIndexException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		} catch (IOException e1) {
			// TODO Auto-generated catch block
			e1.printStackTrace();
		}

		// System.exit(0);
		// try {
		// ArrayList<TermFrequencyCell> list = new ArrayList<TermFrequencyCell>(
		// 100000);
		// IndexReader reader = IndexReader.open(args[0]);
		// TermEnum termenum = reader.terms();
		// Term t;
		// int df;
		// TermFrequencyCell cell;
		// while (termenum.next()) {
		// t = termenum.term();
		// df = termenum.docFreq();
		// if (df < 10) {
		// continue;
		// }
		// cell = new TermFrequencyCell(t.text(), df);
		// list.add(cell);
		// }
		// termenum.close();
		// System.out.println("extraction complete!");
		// Collections.sort(list);
		//
		// FileOutputStream fp = new FileOutputStream(args[1]);
		// int i, size = list.size();
		// String line;
		// for (i = 0; i < size; i++) {
		// cell = list.get(i);
		// line = cell.getTerm() + "\t" + cell.getFrequency() + "\n";
		// fp.write(line.getBytes("UTF-8"));
		// if (i % 1000 == 0) {
		// fp.flush();
		// }
		// }
		// fp.close();
		// } catch (CorruptIndexException e) {
		// // TODO Auto-generated catch block
		// e.printStackTrace();
		// System.exit(-1);
		// } catch (IOException e) {
		// // TODO Auto-generated catch block
		// e.printStackTrace();
		// System.exit(-1);
		// }
		System.out.println("finished");
	}

}
