package com.sdyc.ndmp.cls.engine;

import com.google.common.collect.ImmutableMap;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import com.huaban.analysis.jieba.WordDictionary;
import com.sdyc.ndmp.cls.dtd.DataSentence;
import com.sdyc.ndmp.cls.dtd.KeywordCondition;
import com.sdyc.ndmp.cls.model.MomW;
import com.sdyc.ndmp.cls.model.SonW;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;

import java.io.IOException;
import java.io.StringReader;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;

/**
 *
 * <p>
 *     该类为 Keyword 条件过滤的引擎
 * </p>
 *
 */
public class SenteceKeywordEngine {

    /**
     * LOG
     */
    private static Log LOG = LogFactory.getLog(SenteceKeywordEngine.class);

    private KeywordCondition condition;

    private JiebaSegmenter jieba = new JiebaSegmenter();

    private WordDictionary wd = WordDictionary.getInstance();

    protected Filter filter;

    protected Filter filterSameScope;

    protected Filter singleFilter;

    protected final static AtomicLong counter = new AtomicLong(999);

    public SenteceKeywordEngine() {
    }

    /**
     *
     * 过滤条件的初始化
     *
     * @param condition
     */
    public void init(KeywordCondition condition) {
        //LOG.info("init " + condition);
        this.condition = condition;
        filter = FilterFactory.newInstance(condition);
        initDictWords();
    }

    /**
     * 初始化新增关键词字典
     */
    private void initDictWords(){
        MomW momW = condition.getMomW();
        List<SonW> sonW = momW.getSons();
        //LOG.info("init Dict ... ");
        List<String> allWords = new ArrayList<String>();
        try {
            allWords.addAll(momW.getWords());

            for(SonW sw:sonW){
                allWords.addAll(sw.getWords());
            }
            Collections.sort(allWords,new Comparator<String>() {
                @Override
                public int compare(String o1, String o2) {
                    if(o1.length() != o2.length()) {
                        return o1.length() - o2.length();
                    }
                    return o1.compareTo(o2);
                }
            });
            StringBuilder sb=new StringBuilder();
            for(String word:allWords){
                if(wd.containsWord(word)){
                    sb.append(word + "\t" + (wd.getFreq(word)+counter.addAndGet(1)) + "\n");
                }else {
                    sb.append(word + "\t" + counter.addAndGet(1) + "\n");
                }
            }
            //LOG.info(sb.toString());
            wd.loadUserDict(new StringReader(sb.toString()));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * 给每一条帖子应用该规则
     * @param sentence post
     * @return 返回是否应用成功.
     */
    public Map<String, Object> rule(DataSentence sentence) {
        return ImmutableMap.<String, Object>of("success", true, "message", "data:" + sentence);
    }



    /**
     * 将内容用分词器分词,生成内容下标索引表(去除标点符号以及空格之类的)
     * @param titleS
     * @return
     */
    private Map<String, Set<Integer>> getsplitwordMap(String titleS){
        Map<String,Set<Integer>> splitwordMap=new HashMap<String,Set<Integer>>();
        List<SegToken> lst = jieba.process(titleS, JiebaSegmenter.SegMode.INDEX);
        for(SegToken st:lst){
            if(StringUtils.isEmpty(st.word.replaceAll("\\pP", ""))) {
                continue;
            }

            if(StringUtils.isEmpty(st.word.trim())){
                continue;
            }

            //LOG.info(st.word + " -> " + st.startOffset);
            Set<Integer> windex = splitwordMap.get(st.word);
            if (null == windex) {
                windex = new HashSet<Integer>();
                windex.add(st.startOffset);
                splitwordMap.put(st.word, windex);
            } else {
                windex.add(st.startOffset);
            }
        }
        return splitwordMap;
    }
}
