package termWeighting;

import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.util.Iterator;

import ds.Constants;
import type.TfType;

import edu.udo.cs.wvtool.config.WVTConfiguration;
import edu.udo.cs.wvtool.config.WVTConfigurationFact;
import edu.udo.cs.wvtool.generic.charmapper.WVTCharConverter;
import edu.udo.cs.wvtool.generic.inputfilter.WVTInputFilter;
import edu.udo.cs.wvtool.generic.loader.WVTDocumentLoader;
import edu.udo.cs.wvtool.generic.output.WVTOutputFilter;
import edu.udo.cs.wvtool.generic.output.WordVectorWriter;
import edu.udo.cs.wvtool.generic.stemmer.PorterStemmerWrapper;
import edu.udo.cs.wvtool.generic.stemmer.WVTStemmer;
import edu.udo.cs.wvtool.generic.tokenizer.WVTTokenizer;
import edu.udo.cs.wvtool.generic.tokenizer.SimpleTokenizer;
import edu.udo.cs.wvtool.generic.vectorcreation.TFIDF;
import edu.udo.cs.wvtool.generic.vectorcreation.TermFrequency;
import edu.udo.cs.wvtool.generic.vectorcreation.WVTVectorCreator;
import edu.udo.cs.wvtool.generic.wordfilter.StopWordsWrapper;
import edu.udo.cs.wvtool.generic.wordfilter.WVTWordFilter;
import edu.udo.cs.wvtool.main.WVTDocumentInfo;
import edu.udo.cs.wvtool.main.WVTFileInputList;
import edu.udo.cs.wvtool.main.WVTInputList;
import edu.udo.cs.wvtool.main.WVTWordVector;
import edu.udo.cs.wvtool.main.WVTool;
import edu.udo.cs.wvtool.util.TokenEnumeration;
import edu.udo.cs.wvtool.util.WVToolException;
import edu.udo.cs.wvtool.util.WVToolLogger;
import edu.udo.cs.wvtool.wordlist.WVTWordList;

public class PrepareTF {
	public static void main(String[] args) throws Exception {
			run("data/train", "data/test", "output/TfFile_normalized", "output/wordList", TfType.normolizedTF);
	}

	public static WVTWordVector createPureFrequencyVector(int[] frequencies, int numTermOccurences, WVTWordList wordList, WVTDocumentInfo d){
		int numTerms = wordList.getNumWords();

        // Create the result structure
        WVTWordVector result = new WVTWordVector();
        double[] wv = new double[numTerms];

        // If document contains at least one term
        if (numTermOccurences > 0) {
            // Create the vector
            for (int i = 0; i < wv.length; i++) {
                wv[i] = (double) frequencies[i];
            }
        }

        result.setDocumentInfo(d);
        result.setValues(wv);
        return result;
	}
	
	public static void createPureTFVector( WVTInputList input, WVTConfiguration config, WVTWordList wordList){		
		// Set up the word list properly
		wordList.setAppendWords(false);
		wordList.setUpdateOnlyCurrent(true);

		// Initialize pointers to components for the individual steps
		WVTDocumentLoader loader = null;
		WVTInputFilter infilter = null;
		WVTCharConverter charConverter = null;
		WVTTokenizer tokenizer = null;
		WVTWordFilter wordFilter = null;
		WVTStemmer stemmer = null;
		WVTOutputFilter outputFilter = null;

		// Obtain an expanded list of all documents to consider
		Iterator inList = input.getEntries();

		// Get through the list
		while (inList.hasNext()) {
			WVTDocumentInfo d = (WVTDocumentInfo) inList.next();
			try {
				// Intialize all required components for this document
				loader = (WVTDocumentLoader) config.getComponentForStep(
						WVTConfiguration.STEP_LOADER, d);
				infilter = (WVTInputFilter) config.getComponentForStep(
						WVTConfiguration.STEP_INPUT_FILTER, d);
				charConverter = (WVTCharConverter) config.getComponentForStep(
						WVTConfiguration.STEP_CHAR_MAPPER, d);
				tokenizer = (WVTTokenizer) config.getComponentForStep(
						WVTConfiguration.STEP_TOKENIZER, d);
				wordFilter = (WVTWordFilter) config.getComponentForStep(
						WVTConfiguration.STEP_WORDFILTER, d);
				stemmer = (WVTStemmer) config.getComponentForStep(
						WVTConfiguration.STEP_STEMMER, d);
				outputFilter = (WVTOutputFilter) config.getComponentForStep(
						WVTConfiguration.STEP_OUTPUT, d);

				// Process the document
				TokenEnumeration tokens = stemmer.stem(wordFilter.filter(
						tokenizer.tokenize(
								charConverter.convertChars(
										infilter.convertToPlainText(
												loader.loadDocument(d), d), d),
								d), d), d);
				while (tokens.hasMoreTokens()) {
					wordList.addWordOccurance(tokens.nextToken());
				}
				outputFilter
						.write(createPureFrequencyVector(
								wordList.getFrequenciesForCurrentDocument(),
								wordList.getTermCountForCurrentDocument(),
								wordList, d));

				wordList.closeDocument(d);
				loader.close(d);
			} catch (WVToolException e) {

			}
		}
	}
	
	/**
	 * function : calculate TF(normalization) expression: TF =
	 * (frequency/sumOfOccurence) / length
	 * 
	 * @param SourceDir
	 *            : path of files to be calculated
	 * @param WordlistPos
	 *            : location of word list
	 * @param WordvectorPos
	 *            : location of word vector, weight is TF
	 * @throws Exception
	 */
	public static void run(String trainingFileDir, String testingFileDir,
			String TfFile, String Wordlistfile, TfType TFtype) throws Exception {
		WVTool wvt = new WVTool(false);
		WVTConfiguration config = new WVTConfiguration();
		// change parameter to achieve better result
		//Token
		WVTTokenizer tk = new SimpleTokenizer();	
		config.setConfigurationRule(WVTConfiguration.STEP_TOKENIZER, new WVTConfigurationFact(tk));
		//stem
		WVTStemmer stemmer = new PorterStemmerWrapper();
		config.setConfigurationRule(WVTConfiguration.STEP_STEMMER, new WVTConfigurationFact(stemmer));
		//filter
		WVTWordFilter filter = new StopWordsWrapper();
		config.setConfigurationRule(WVTConfiguration.STEP_WORDFILTER, new WVTConfigurationFact(filter));

		//add train directory & test directory into list 
		WVTFileInputList list = new WVTFileInputList(1);
		
		if(trainingFileDir != null)
        	list.addEntry(new WVTDocumentInfo(trainingFileDir, "", "", "english", 0));      
		if(testingFileDir != null)
        	list.addEntry(new WVTDocumentInfo(testingFileDir, "", "", "english", 0));
		
		//get word list
		WVTWordList wordList = wvt.createWordList(list, config);

		wordList.storePlain(new FileWriter(Wordlistfile));
		FileWriter outFile = new FileWriter(TfFile);
		
		int trainCount = trainingFileDir == null ? 0 : (new File(trainingFileDir)).listFiles().length;
		int testCount = testingFileDir == null? 0 : (new File(testingFileDir)).listFiles().length;
		outFile.append("WordCount:" + wordList.getNumWords() + " "
				+ "firstPart:" + trainCount + " "
				+ "secondPart:" + testCount + Constants.lineSeperator);
		
		WordVectorWriter wvw = new WordVectorWriter(outFile, true);
		
		config.setConfigurationRule(WVTConfiguration.STEP_OUTPUT, new WVTConfigurationFact(wvw));
		config.setConfigurationRule(WVTConfiguration.STEP_VECTOR_CREATION, new WVTConfigurationFact(new TermFrequency()));
		
		if(TFtype == TfType.normolizedTF)
			wvt.createVectors(list, config, wordList);
		else if(TFtype == TfType.pureTF)
			createPureTFVector(list, config, wordList);
		else if(TFtype == TfType.TFIDF){
			config.setConfigurationRule(WVTConfiguration.STEP_VECTOR_CREATION, new WVTConfigurationFact(new TFIDF()));
			wvt.createVectors(list, config, wordList);
		}
		
		wvw.close();
		outFile.close();
	}
}
