package nlp;

import java.io.IOException;
import java.util.HashMap;
import java.util.HashSet;

import org.tartarus.snowball.ext.porterStemmer;

import edu.stanford.nlp.tagger.maxent.MaxentTagger;

public class DocumentTester {
	public static void main(String argv[]) throws IOException,
			ClassNotFoundException {
		if (argv.length > 0) {
			porterStemmer stemmer = new porterStemmer();

			// Mood words.
			String[] words = FileUtil.ReadFileContent(argv[2]).split("\n");
			HashSet<String> moodWords = new HashSet<String>();
			for (String word : words) {
				stemmer.setCurrent(word.toLowerCase());
				stemmer.stem();
				moodWords.add(stemmer.getCurrent());
			}

			// Stop words
			words = FileUtil.ReadFileContent(argv[3]).split("\n");
			HashSet<String> stopWords = new HashSet<String>();
			for (String word : words) {
				stemmer.setCurrent(word.toLowerCase());
				stemmer.stem();
				stopWords.add(stemmer.getCurrent());
			}

			// TfIaf map
			String[] lines = FileUtil.ReadFileContent(argv[4]).split("\n");
			HashMap<String, Double> tfiafMap = new HashMap<String, Double>();
			for (String line : lines) {
				String[] tokens = line.split("\t");
				tfiafMap.put(tokens[0], Double.parseDouble(tokens[1]));
			}

			TextDocument.tagger = new MaxentTagger(argv[1]);
			TextDocument.moodWordSet = moodWords;
			TextDocument.stopWordSet = stopWords;
			TextDocument.tfiafMap = tfiafMap;

			String textString = FileUtil.ReadFileContent(argv[0]);
			TextDocument doc1 = new TextDocument(textString, "Gandhi");
			String serializedStr = doc1.SerializeFeatures();
			System.out.println("Serialized the features!");

			TextDocument doc = new TextDocument();
			doc.Deserialize(serializedStr);
			String[] featureNames = doc.GetAllFeatureNames();
			for (String name : featureNames) {
				Feature feature = doc.GetFeature(name);
				System.out.println(name + ": " + feature.GetValue().toString());
			}

		} else {
			System.err.println("Enter a test document!");
		}
	}
}
