package cn.git.analysis;

import cn.git.init.AnalyzerInit;
import com.alibaba.fastjson.JSONObject;
import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;
import lombok.extern.slf4j.Slf4j;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.util.ArrayList;
import java.util.List;
import java.util.stream.Collectors;

/**
 * @description: 分词测试controller
 * @program: bank-credit-sy
 * @author: lixuchun
 * @create: 2024-08-13
 */
@Slf4j
@RestController
@RequestMapping("/analyzer")
public class AnalyzerController {

    /**
     * 分词测试方法
     */
    @GetMapping("/test")
    public String test() {
        // 创建分词对象
        JiebaSegmenter jiebaSegmenter = new JiebaSegmenter();

        // 其中傻X是自定义分词，正常接收到字符串首先去除空格，然后调用分词方法
        String[] sentences = new String[] {
                "傻X上海这是一个伸手不见五指的黑夜。我叫孙悟小狗蛋X空咸阳6合彩，我爱北京，我爱Python和C++。h动画", "我不喜欢十八大日本和服。", "雷猴回归人间。"
        };

        // 进行分词展示
        List<String> sentenceWordList = new ArrayList<>();
        for (String sentence : sentences) {
            List<SegToken> process = jiebaSegmenter.process(sentence, JiebaSegmenter.SegMode.INDEX);
            log.info("分词结果为 : " + JSONObject.toJSONString(process.stream().map(word -> word.word).collect(Collectors.toList())) );
            process.forEach(segToken -> {
                if (AnalyzerInit.sensitiveWordsSet.contains(segToken.word)) {
                    sentenceWordList.add(segToken.word);
                }
            });
        }

        // 输出敏感词汇
        return JSONObject.toJSONString(sentenceWordList);
    }
}
