package Word_Frequency;

import com.huaban.analysis.jieba.JiebaSegmenter;
import com.huaban.analysis.jieba.SegToken;

import java.io.IOException;
import java.util.*;

class WordFrequency {
    private Set<String> stopWords;
    private JiebaSegmenter segmenter;

    public WordFrequency(String stopWordsFilePath) throws IOException {
        this.stopWords = StopWordsLoader.loadStopWords(stopWordsFilePath);
        this.segmenter = new JiebaSegmenter();
    }

    public Map<String, Integer> getWordFrequency(String text) {
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.SEARCH); // 使用SEARCH模式更有利于长词识别
        Map<String, Integer> wordFrequency = new HashMap<>();
        for (SegToken token : tokens) {
            String word = token.word.trim();
            if (!stopWords.contains(word) && !word.isEmpty() && !word.matches("\\s+") && !word.matches("[　]+")) {
                wordFrequency.put(word, wordFrequency.getOrDefault(word, 0) + 1);
            }
        }
        return wordFrequency;
    }

    public List<String> getWords(String text) {
        List<SegToken> tokens = segmenter.process(text, JiebaSegmenter.SegMode.SEARCH); // 使用SEARCH模式更有利于长词识别
        List<String> words = new ArrayList<>();
        for (SegToken token : tokens) {
            String word = token.word.trim();
            if (!stopWords.contains(word) && !word.isEmpty() && !word.matches("\\s+") && !word.matches("[　]+")) {
                words.add(word);
            }
        }
        return words;
    }
}