package com.xcx.utils;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.WordDictionary;
import org.springframework.stereotype.Component;

import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;

/**
 * Springboot项目整合jieba分词，实现语句最精确的切分
 */

@Component
public class TextUtils {

    // 初始化操作
    private JiebaSegmenter segmenter = new JiebaSegmenter();

    Set<String> stopWordsAndPunctuation = new HashSet<>(Arrays.asList("并且", "已经","需要","怎么办"));
    /**
     * 获取分词内容
     */
    public List<String> getSignaleWord(String words) {

        // 匹配分词模板
        Path path = Paths.get("xcx-server/src/main/resources/zzj.txt");
        WordDictionary.getInstance().loadUserDict(path);
        List<String> resultList = segmenter.sentenceProcess(words);
        // 过滤掉特定词和标点符号
        List<String> filteredWords = resultList.stream()
                .filter(word -> !stopWordsAndPunctuation.contains(word) && word.length() > 1)
                .distinct()
                .collect(Collectors.toList());
        return filteredWords;
    }
}


