package mpo.core;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.NlpAnalysis;

import mpo.model.News;

public class ClusterAnalysis {

	/**
	 * 计算每个文章的词频(TF)
	 * 
	 * @param list
	 * @return
	 */
	public static Map<News, Map<String, Double>> getTf(List<News> list) {
		Map<News, Map<String, Double>> allTf = new HashMap<News, Map<String, Double>>();
		Map<String, Double> tf = null;
		Map<String, Integer> words = null;
		Double size = null;
		Set<String> keys = null;

		for (News news : list) {
			tf = new HashMap<String, Double>();
			words = segString(news.getStr("content"));
			size = Double.valueOf(words.size());
			keys = words.keySet();

			for (String key : keys) {
				tf.put(key, Double.valueOf(words.get(key)) / size);
			}

			allTf.put(news, tf);
		}

		return allTf;

	}

	/**
	 * 计算逆文档频率(IDF)
	 * 
	 * @param tf
	 * @return
	 */
	public static Map<News, Map<String, Double>> getIdf(
			Map<News, Map<String, Double>> tf) {

		return tf;
	}

	/**
	 * 分词
	 * 
	 * @param content
	 * @return
	 */
	private static Map<String, Integer> segString(String content) {

		Map<String, Integer> words = new HashMap<String, Integer>();

		if (content.isEmpty()) {
			return words;
		}

		List<Term> terms = NlpAnalysis.parse(content);
		for (Term term : terms) {
			if (words.containsKey(term.getName())) {
				words.put(term.getName(), words.get(term.getName()) + 1);
			} else {
				words.put(term.getName(), 1);
			}
		}

		return words;
	}
}
