package org.qengine.utils.Deduplicator;


import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;

import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.stream.Collectors;

public class TextPreprocessor {

    // 停用词表
    private static final Set<String> STOP_WORDS = new HashSet<>(Arrays.asList(
            "的", "是", "在", "和", "了", "有", "我", "你", "他", "这", "那", "就", "也", "不" ,"请"
    ));

    // 分词器
    private static final JiebaSegmenter segmenter = new JiebaSegmenter();

    // 文本预处理
    public static String preprocess(String text) {
        // 分词
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.SEARCH);
        // 过滤停用词和标点符号
        return tokens.stream()
                .filter(token -> !isPunctuation(token.word)) // 过滤标点符号
                .filter(token -> !STOP_WORDS.contains(token.word)) // 过滤停用词
                .map(token -> token.word)
                .collect(Collectors.joining(" "));
    }

    // 判断是否为标点符号
    private static boolean isPunctuation(String word) {
        return word.matches("[\\pP\\p{Punct}]");
    }

}