package umich.max.geolocation.featextract.tf.blog;

import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;

import umich.max.geolocation.featextract.tf.TF;

import max.nlp.dal.blog.blogger.BlogAuthorProfile;
import max.nlp.dal.blog.blogger.BloggerBlog;
import max.nlp.dal.blog.blogger.BloggerDB;
import max.nlp.dal.blog.blogger.BloggerPost;
import max.nlp.dal.blog.blogger.ParsedLocation;
import max.nlp.util.ObjectSerializer;
import max.nlp.util.StopWords;
import max.nlp.wrappers.ml.weka.FeatureSaver;
import max.nlp.wrappers.ml.weka.WekaFeatureExtractor;
import max.nlp.wrappers.stanford.StanfordNLP;

import com.mongodb.DBCursor;

public class TFICFPerBlogComputer extends
		WekaFeatureExtractor<String, Double> {

	private static String prefix = "tficf_";
	private StanfordNLP annotator = StanfordNLP.getInstance();
	private StopWords swRemover = StopWords.getInstance();
	private BloggerDB db = BloggerDB.getInstance();
	HashMap<String, Double> icf;

	public TFICFPerBlogComputer() {
		super(prefix);
		ObjectSerializer<HashMap<String, Double>> loader = new ObjectSerializer<HashMap<String, Double>>();
		icf = loader.load("icfs-states");
	}



	@Override
	public HashMap<String, Double> extractFeaturesForObject(Object item) {

		BloggerBlog blog = (BloggerBlog) item;
		ArrayList<String> allWords = new ArrayList<String>();
		HashSet<String> uniqueWords = new HashSet<String>();

		List<BloggerPost> posts = blog.getPosts();
		for (BloggerPost p : posts) {

			// convert the text to words via stanford, and then
			// combine them into one big document

			String text = swRemover.removeStopWords(p.getCleanContent()
					.toLowerCase());
			List<String> tokens = annotator.convertTextToTokens(text);
			allWords.addAll(tokens);
			uniqueWords.addAll(tokens);
		}

		// Compute the TF vectors for the blog
		HashMap<String, Double> tfCounts = new HashMap<String, Double>();
		for (String word : uniqueWords) {
			word = prefix + word;
			double tf = TF.computeTF(allWords, word);
			Double icfVal = icf.get(word);
			if (icfVal != null){
				tf = tf * icfVal;
			}
			tfCounts.put(word, tf);
		}
		return tfCounts;
	}

	// LEGACY

}
