package org.example;

import edu.stanford.nlp.ling.CoreLabel;
import edu.stanford.nlp.pipeline.CoreDocument;
import edu.stanford.nlp.pipeline.StanfordCoreNLP;

import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.HashSet;
import java.util.Properties;
import java.util.Set;
import java.util.regex.*;

public class TextUtil {

    private static final StanfordCoreNLP pipeline;

    static {
        // 初始化 CoreNLP pipeline
        Properties props = new Properties();
        props.setProperty("annotators", "tokenize,ssplit,pos,lemma");
        pipeline = new StanfordCoreNLP(props);
    }

    public static Set<String> extractWords(File file) throws IOException {
        StringBuilder sb = new StringBuilder();
        try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(file), StandardCharsets.UTF_8))) {
            String line;
            while ((line = br.readLine()) != null) {
                sb.append(line).append(" ");
            }
        }

        // 使用 CoreNLP 进行词形还原
        CoreDocument document = new CoreDocument(sb.toString());
        pipeline.annotate(document);

        Set<String> words = new HashSet<>();
        for (CoreLabel tok : document.tokens()) {
            String lemma = tok.lemma().toLowerCase();
            // 只保留纯小写字母的词根
            if (lemma.matches("[a-z]+")) {
                words.add(lemma);
            }
        }
        return words;
    }
}