package wviz.index;

import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.SortedSet;
import java.util.TreeSet;

import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermFreqVector;

import wviz.Constants;

public class TfCalculator {

	private final IndexReader ir;
	private final int highFreqWordsTotal;

	final Map<String /* word */, Integer/* tf */> word_tfidfMap = new HashMap<String, Integer>();

	public TfCalculator(final IndexReader r, int n) {
		ir = r;
		System.out.println("ir is " + ir.toString());
		highFreqWordsTotal = n;
	}

	public Term[] calcTfIdfSortedSet() throws IOException {

		String[] words;
		int numberWords = 0;
		TermFreqVector positiveVector;
		int tf;

		SortedSet<StringWithIntScore> sws = new TreeSet<StringWithIntScore>();

		for (int docNum = 0; docNum < ir.numDocs(); docNum++) {

			System.out.println("ir numbdocs " + ir.numDocs());
			positiveVector = ir.getTermFreqVector(docNum,
					Constants.FIELD_CONTENTS);

			if (positiveVector != null) {
				words = positiveVector.getTerms();
				final int positiveFreqs[] = positiveVector.getTermFrequencies();

				String word;
				for (int j = 0; j < words.length; j++) {
					word = words[j];
					tf = positiveFreqs[j];
					sws.add(new StringWithIntScore(word, tf));
				}
				numberWords = words.length;
			}
		}

		Term[] t = new Term[Math.min(highFreqWordsTotal, numberWords)];
		int i = 0;

		for (Iterator<StringWithIntScore> itr = sws.iterator(); itr.hasNext()
				&& i < highFreqWordsTotal; i++) {
			String word = itr.next().getWord();
			// System.out.println("word  " + word);
			t[i] = new Term(Constants.FIELD_CONTENTS, word);
		}
		return t;
	}
}