package preprocessing;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import utils.StopWords;

import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.util.List;
import java.util.regex.Pattern;
import java.util.stream.Collectors;

public class ChinesePreprocessor implements Preprocessor {
    private static final Pattern NON_CHINESE = Pattern.compile("[^\u4e00-\u9fa5\\s]");
    private final StopWords stopWords;

    public ChinesePreprocessor() throws IOException {
        this.stopWords = new StopWords("src/main/resources/stopwords/chinese_stopwords.txt");
    }

    @Override
    public String preprocessText(String text) throws IOException {
        // 移除 HTML 标签
        String noHtml = text.replaceAll("<[^>]+>", "");

        // 删除非中文字符
        String onlyChinese = NON_CHINESE.matcher(noHtml).replaceAll(" ");

        // 使用 HanLP 库进行分词
        List<String> tokens = tokenize(onlyChinese);

        // 移除停用词
        List<String> noStopWords = tokens.stream()
                .filter(token -> !stopWords.isStopWord(token))
                .collect(Collectors.toList());

        return String.join(" ", noStopWords);
    }

    @Override
    public void preprocessFile(File inputFile, File outputFile) throws IOException {
        String content = new String(Files.readAllBytes(inputFile.toPath()));
        String processedContent = preprocessText(content);
        Files.write(outputFile.toPath(), processedContent.getBytes());
    }

    @Override
    public List<String> tokenize(String text) {
        List<Term> termList = HanLP.segment(text);
        return termList.stream()
                .map(term -> term.word)
                .filter(word -> !word.trim().isEmpty())
                .collect(Collectors.toList());
    }
}