package com.mxc.website.util;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.common.Term;

import java.io.*;
import java.util.*;

/**
 * @author wendongdong
 */
public class WordSegmentationUtil {
    // 读取隔离词文件到集合中
    public static Set<String> readStopWordsFromFile(String filePath) throws IOException {

        Set<String> stopWords = new HashSet<>();
        // 获取当前类的类加载器
        ClassLoader classLoader = WordSegmentationUtil.class.getClassLoader();
        // 通过类加载器获取资源的输入流
        try (InputStream inputStream = classLoader.getResourceAsStream(filePath);
             BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
            String line;
            while ((line = reader.readLine()) != null) {
                // 去除首尾空格
                if (!line.isEmpty()) {
                    stopWords.add(line);
                }
            }
        } catch (IOException e) {
            System.err.println("读取文件时出现错误: " + e.getMessage());
        }
        return stopWords;
    }

    public static Set<String> HighFrequencyWords() throws IOException {
        Set<String> highWords = new HashSet<>();
        // 获取当前类的类加载器
        ClassLoader classLoader = WordSegmentationUtil.class.getClassLoader();
        // 通过类加载器获取资源的输入流
        try (InputStream inputStream = classLoader.getResourceAsStream("dictionary/highDict.txt");
             BufferedReader reader = new BufferedReader(new InputStreamReader(inputStream))) {
            String line;
            while ((line = reader.readLine()) != null) {
                // 去除首尾空格
                if (!line.isEmpty()) {
                    highWords.add(line);
                }
            }
        } catch (IOException e) {
            System.err.println("读取文件时出现错误: " + e.getMessage());
        }
        return highWords;
    }


    // 自定义分词方法，剔除隔离词
    public static List<String> segmentWithoutStopWords(String text, String stopWordsFilePath) {
        try {
            // 读取隔离词文件
            Set<String> stopWords = readStopWordsFromFile(stopWordsFilePath);
            List<String> result = new ArrayList<>();
            Set<String> strings = HighFrequencyWords();
            strings.forEach(CustomDictionary::add);
            // 使用 HanLP 进行分词
            List<Term> termList = HanLP.segment(text);
            for (Term term : termList) {
                String word = term.word;
                // 去除首尾空格
                word = word.trim();
                // 如果当前词语不为空且不在隔离词集合中，则添加到结果列表
                if (!word.isEmpty() && !stopWords.contains(word)) {
                    result.add(word);
                }
            }
            return result;
        } catch (Exception e) {
            e.printStackTrace();
            return Collections.emptyList();
        }
    }

}
