package com.qf.smsplatform.strategy.service.impl;

import com.qf.smsplatform.common.constants.CacheConstants;
import com.qf.smsplatform.common.constants.StrategyConstants;
import com.qf.smsplatform.common.model.StandardSubmit;
import com.qf.smsplatform.strategy.dfa.SensitiveWordFilter;
import com.qf.smsplatform.strategy.service.Strategy;
import com.qf.smsplatform.strategy.service.api.CacheService;
import com.qf.smsplatform.strategy.util.PushMsgUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;
import org.wltea.analyzer.core.IKSegmenter;
import org.wltea.analyzer.core.Lexeme;

import java.io.IOException;
import java.io.StringReader;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Set;

/**
 * 敏感词策略
 */
@Service(value = "dirtyWord")
@Slf4j
public class DirtyWordStrategy  implements Strategy {

    @Autowired
    private CacheService cacheService;

    @Autowired
    private PushMsgUtil pushMsgUtil;

    @Override
    public boolean strategy(StandardSubmit submit) {
        log.info("【策略模块】 敏感词策略开始执行！！！");
        /**
         * 将敏感词作为key的校验方式

        //1. 获取用户输入的短信内容
        String content = submit.getMessageContent();

        //2. 对短信内容用IK分词器分词
        StringReader stringReader = new StringReader(content);
        IKSegmenter ik = new IKSegmenter(stringReader,true);

        //3. 遍历分词器分出来的全部词汇，去Redis中依次匹配
        Lexeme lex = null;
        while(true){
            try {
                if((lex = ik.next()) == null){
                    break;
                }
            } catch (IOException e) {
                e.printStackTrace();
            }
            // 获取分词器分出来的词汇
            String text = lex.getLexemeText();   //哈哈           DIRTY_WORDS:哈哈
            String value = cacheService.get(CacheConstants.CACHE_PREFIX_DIRTY_WORDS + text);
            if(!StringUtils.isEmpty(value)){
                //4. 如果匹配到某一个，说明短信内容中包含敏感词汇，校验未通过，记录日志，发送消息，return false。
                log.info("【策略模块】 敏感词策略执行失败。。。");
                pushMsgUtil.sendLog(submit, StrategyConstants.STRATEGY_ERROR_DIRTY_WORDS);
                pushMsgUtil.pushReport(submit, StrategyConstants.STRATEGY_ERROR_DIRTY_WORDS);
                return false;
            }
        }
        //5. 如果循环结束，一直没有匹配到，敏感词校验通过，记录日志，return true
        log.info("【策略模块】 敏感词策略执行成功！！！");
        return true;
         */
        // 试用DFA算法的方式

        //1. 获取敏感词的过滤器对象
        SensitiveWordFilter sensitiveWordFilter = SensitiveWordFilter.getInstance();
        //2. 查看敏感词信息
        int i = sensitiveWordFilter.checkSensitiveWord(submit.getMessageContent(), 0, SensitiveWordFilter.minMatchType);
        //3. 判断敏感词数量
        if(i == 0){
            log.info("【策略模块】 敏感词策略执行成功！！！");
            return true;
        }else{
            log.info("【策略模块】 敏感词策略执行失败。。。");
            pushMsgUtil.sendLog(submit, StrategyConstants.STRATEGY_ERROR_DIRTY_WORDS);
            pushMsgUtil.pushReport(submit, StrategyConstants.STRATEGY_ERROR_DIRTY_WORDS);
            return false;
        }
    }
}
