package com.lucifer.cloud.boot.blog.chat.util;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.Resource;
import org.springframework.stereotype.Service;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

/**
 * @author lucifer
 * @date 2025/2/28 10:32
 */
@Service
public class KeywordExtractorUtil {

    @Value("classpath:stopWords.txt")
    private Resource stopWordsFile;

    public Map<String, Integer> KeywordExtractor(String text) {
        // 停用词列表
        Set<String> stopWords = loadStopWords();
        // 分词结果
        Map<String, Integer> frequencyMap = new HashMap<>();

        // 统计中文和英文字符的数量
        int chineseCount = 0;
        int englishCount = 0;

        for (char c : text.toCharArray()) {
            if (isChinese(c)) {
                chineseCount++;
            } else if (Character.isLetter(c)) {
                englishCount++;
            }
        }

        // 判断中文字符是否多于英文字符
        if (chineseCount >= englishCount) {
            frequencyMap = chineseKeywordExtractor(text, stopWords, frequencyMap);
        }else {
            frequencyMap = chineseKeywordExtractor(text, stopWords, frequencyMap);
            frequencyMap = englishKeywordExtractor(text, frequencyMap);
        }
        return frequencyMap;
    }


    // 获取文本中的前 N 个关键词
    public List<String> loadTopKeywords(String text, int topN) {
        Map<String, Integer> keywordMap = KeywordExtractor(text);
        List<Map.Entry<String, Integer>> entryList = new ArrayList<>(keywordMap.entrySet());
        // 按照频率排序
        entryList.sort((entry1, entry2) -> entry2.getValue().compareTo(entry1.getValue()));
        List<String> topKeywords = new ArrayList<>();
        for (int i = 0; i < Math.min(topN, entryList.size()); i++) {
            topKeywords.add(entryList.get(i).getKey());
        }
        return topKeywords;
    }


    // 处理中文部分
    private Map<String, Integer> chineseKeywordExtractor(String text,Set<String> stopWords,Map<String, Integer> frequencyMap) {
        // 1. 处理中文部分
        List<Term> termList = HanLP.segment(text);

        // 2. 统计中文词汇频率
        for (Term term : termList) {
            String word = term.word;
            // 过滤停用词
            if (stopWords.contains(word)) {
                continue;
            }
            // 只统计名词，避免助词、动词等干扰
            if (term.nature.toString().equals("n") || term.nature.toString().equals("noun")) {
                word = word.toLowerCase();  // 转小写
                frequencyMap.put(word, frequencyMap.getOrDefault(word, 0) + 1);
            }
        }
        return frequencyMap;
    }

    // 处理中文部分
    private Map<String, Integer> englishKeywordExtractor(String text,Map<String, Integer> frequencyMap) {
        // 3. 处理英文部分（通过正则匹配）
        Pattern pattern = Pattern.compile("[a-zA-Z]+");
        Matcher matcher = pattern.matcher(text);

        while (matcher.find()) {
            String word = matcher.group().toLowerCase();  // 转小写
            frequencyMap.put(word, frequencyMap.getOrDefault(word, 0) + 1);
        }
        return frequencyMap;
    }




    // 加载停用词文件
    public Set<String> loadStopWords() {
        Set<String> stopWords = new HashSet<>();
        try (BufferedReader reader = new BufferedReader(new InputStreamReader(stopWordsFile.getInputStream()))) {
            String line;
            while ((line = reader.readLine()) != null) {
                stopWords.add(line.trim());
            }
        } catch (IOException e) {
            System.err.println("Error loading stop words: " + e.getMessage());
        }
        return stopWords;
    }

    // 判断一个字符是否是中文字符
    private boolean isChinese(char c) {
        return c >= 0x4e00 && c <= 0x9fa5;
    }

}
