package nlp;

import java.io.StringReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;

import weka.core.tokenizers.NGramTokenizer;
import edu.stanford.nlp.ling.HasWord;
import edu.stanford.nlp.ling.TaggedWord;
import edu.stanford.nlp.tagger.maxent.MaxentTagger;

public class POSGramFeatureCreator implements FeatureCreator {

	private String textString;
	private MaxentTagger tagger;

	public POSGramFeatureCreator(String textString, MaxentTagger tagger) {
		this.textString = textString;
		this.tagger = tagger;
	}

	@SuppressWarnings("unchecked")
	@Override
	public void CreateFeature(Object obj) {
		List<ArrayList<? extends HasWord>> sentences = MaxentTagger
				.tokenizeText(new StringReader(textString));
		String posString = new String();
		for (ArrayList<? extends HasWord> sentence : sentences) {
			ArrayList<TaggedWord> tSentence = this.tagger.tagSentence(sentence);
			for (TaggedWord tWord : tSentence) {
				if (tWord.tag().length() > 0) {
					posString += tWord.tag() + " ";
				}
			}
		}

		// Tokenize the pos-gram text.
		int ngramSize = 3;
		NGramTokenizer ngramTokenizer = new NGramTokenizer();
		ngramTokenizer.tokenize(posString);
		ngramTokenizer.setNGramMaxSize(ngramSize);
		ngramTokenizer.setNGramMinSize(ngramSize);

		List<String> posGrams = new ArrayList<String>();
		while (ngramTokenizer.hasMoreElements()) {
			posGrams.add((String) ngramTokenizer.nextElement());
		}

		FreqDistUtil freqDistUtil = new FreqDistUtil(posGrams, posGrams.size());
		List<FreqData> freqDist = freqDistUtil.getTop(100);
		HashMap<String, Feature> featureMap = (HashMap<String, Feature>) obj;
		for (FreqData data : freqDist) {
			Feature feature = new POSGramFeature(data.getData(), data
					.getFrequency());
			featureMap.put(feature.GetFeatureName(), feature);
		}
	}

	@Override
	public String GetFeatureName() {
		return "POSGramTop100";
	}

}
