package org.hit.burkun.stat;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

import org.ansj.domain.Term;
import org.ansj.splitWord.analysis.NlpAnalysis;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.hit.burkun.model.Content;
import org.hit.burkun.utils.StaticMethods;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.huaban.analysis.jieba.JiebaSegmenter;

/**
 * analyze the centence and split the words
 * 
 * @author mingchen
 * @date 2015年8月14日
 */
public class ContentIndex {
	// Logger
	private final Logger log = LoggerFactory.getLogger(ContentIndex.class);

	// parameter to store the data needed
	private Map<String, Content> mid2contentTrain = null;
	private final Map<String, List<Term>> topicSplit = new HashMap<String, List<Term>>();
	private final Map<String, List<Term>> contentSplit = new HashMap<String, List<Term>>();

	/**
	 * 
	 * Constructor function of Class
	 */
	public ContentIndex() {
		mid2contentTrain = StaticMethods.getMid2ContentTrain();
	}

	/**
	 * split topic words
	 * 
	 * void
	 * 
	 * 2015年8月14日
	 */
	public void splitTopicWords() {

		JiebaSegmenter segmenter = new JiebaSegmenter();
		for (String mid : mid2contentTrain.keySet()) {
			Content content = mid2contentTrain.get(mid);
			String topic = "";
			for (String sub : content.getTopics()) {
				topic += sub;
			}
			if (topic.trim().length() == 0) {
				for (String sub : content.getOtherTopic()) {
					topic += sub;
				}
			}
			if (topic.trim().length() == 0) {
				topicSplit.put(mid, null);
				log.info(mid + " splited  and dump [" + "]");
			} else {
				log.info(topic);
				List<Term> list = ToAnalysis.parse(topic);
				topicSplit.put(mid, list);
				log.info(mid + " splited  and dump [" +list.size()+","+topicSplit.size() + "]");
			}


		}
		Serialization.save(topicSplit, "topicSplit.out");
		log.info("topicSplit has been serialized into topicSplit.out");
	}

	/**
	 * Split the content
	 * 
	 * void
	 * 
	 * 2015年8月14日
	 */
	public void splitCleanContent() {
		for (String mid : mid2contentTrain.keySet()) {
			Content content = mid2contentTrain.get(mid);
			List<Term> list = NlpAnalysis.parse(content.getCleanStr());
			contentSplit.put(mid, list);
			log.info(mid + " splited  and dump [" +list.size()+","+contentSplit.size() + "]");
		}
		Serialization.save(contentSplit,"contentSplit.out");
		log.info("contentSplit has been serialized into contentSplit.out");
	}

	public void removeStops() {

	}
}
