package umich.max.geolocation.featextract.tf.blog;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;

import umich.max.geolocation.featextract.tf.TF;

import max.nlp.dal.blog.blogger.BlogAuthorProfile;
import max.nlp.dal.blog.blogger.BloggerBlog;
import max.nlp.dal.blog.blogger.BloggerDB;
import max.nlp.dal.blog.blogger.BloggerPost;
import max.nlp.dal.blog.blogger.ParsedLocation;
import max.nlp.util.StopWords;
import max.nlp.wrappers.ml.weka.WekaFeatureExtractor;
import max.nlp.wrappers.stanford.StanfordNLP;

import com.mongodb.DBCursor;

public class TFPerBlogComputer extends WekaFeatureExtractor<String, Double> {

	
	private static String prefix  = "tf_";

	public TFPerBlogComputer() {
		super(prefix);
	}

	private StanfordNLP annotator = StanfordNLP.getInstance();
	private StopWords swRemover = StopWords.getInstance();
	private BloggerDB db = BloggerDB.getInstance();

	@Override
	public HashMap<String, Double> extractFeaturesForObject(Object item) {
		BloggerBlog blog = (BloggerBlog) item;

		ArrayList<String> allWords = new ArrayList<String>();
		HashSet<String> uniqueWords = new HashSet<String>();

		List<BloggerPost> posts = blog.getPosts();
		for (BloggerPost p : posts) {

			// convert the text to words via stanford, and then
			// combine them into one big document

			String text = swRemover.removeStopWords(p.getCleanContent()
					.toLowerCase());
			List<String> tokens = annotator.convertTextToTokens(text);
			allWords.addAll(tokens);
			uniqueWords.addAll(tokens);
		}

		// Compute the TF vectors for the blog
		HashMap<String, Double> tfCounts = new HashMap<String, Double>();
		for (String word : uniqueWords) {
			double tf = TF.computeTF(allWords, word);
			word = prefix + word;
			tfCounts.put(word, tf);
		}
		return tfCounts;
	}

	// LEGACY

}
