package org.example.comp_algorithm_back.service.impl;

import com.baomidou.mybatisplus.extension.service.impl.ServiceImpl;
import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import org.example.comp_algorithm_back.common.CommonResponse;

import org.example.comp_algorithm_back.domain.WordFrequency;
import org.example.comp_algorithm_back.mapper.WordFrequencyMapper;
import org.example.comp_algorithm_back.service.WordFrequencyService;
import org.example.comp_algorithm_back.util.DataCleanUtil;
import org.example.comp_algorithm_back.util.PathClass;
import org.example.comp_algorithm_back.vo.WordNumVo;
import org.springframework.stereotype.Service;

import java.io.BufferedReader;
import java.io.FileReader;
import java.io.IOException;
import java.util.*;


@Service
public class WordFrequencyServiceImpl extends ServiceImpl<WordFrequencyMapper, WordFrequency> implements WordFrequencyService {

    @Override
    public CommonResponse<ArrayList<WordNumVo>> getAndSaveWordFrequency(int num) {
        ArrayList<WordNumVo> result = new ArrayList<>();

        DataCleanUtil.Check();
        // 读取文件内容
        StringBuilder contentBuilder = new StringBuilder();
        try (BufferedReader br = new BufferedReader(new FileReader(PathClass.wordOut))) {
            String currentLine;
            while ((currentLine = br.readLine()) != null) {
                contentBuilder.append(currentLine).append("\n");
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        // 获取文件内容
        String content = contentBuilder.toString();
        // 使用HanLP进行分词
        List<Term> termList = HanLP.segment(content);
        // 统计词频
        Map<String, Integer> wordCountMap = new HashMap<>();
        boolean skipFirstWord = true; // 标记是否跳过第一个词

        for (Term term : termList) {
            String word = term.word;
            if (skipFirstWord || "\t\n".equals(word)) {
                skipFirstWord = false; // 跳过第一个词 \t\n
                continue;
            }
            if (isValidWord(word)) { // 过滤停用词
                wordCountMap.put(word, wordCountMap.getOrDefault(word, 0) + 1);
            }
        }


        // 获取词频最高的前num个词
        List<Map.Entry<String, Integer>> sortedEntries = new ArrayList<>(wordCountMap.entrySet());
        sortedEntries.sort(Map.Entry.<String, Integer>comparingByValue().reversed());

        // 输出前num个词
        int count = 0;
        for (Map.Entry<String, Integer> entry : sortedEntries) {
            if (count >= num) break;
            result.add(new WordNumVo(entry.getKey(), entry.getValue()));
            count++;
        }

        // 保存到数据库
        saveWordFrequencies(result);

        return CommonResponse.createForSuccess("词频查找完毕", result);
    }

    private void saveWordFrequencies(List<WordNumVo> wordNumVos) {
        for (WordNumVo vo : wordNumVos) {
            WordFrequency wf = new WordFrequency();
            wf.setWord(vo.getWord());
            wf.setNum(vo.getNum());
            save(wf);
        }
    }



    // 判断是否为有效的词（即不是标点符号或网址）
    private boolean isValidWord(String word) {
        // 短
        if (word.length() < 2) {
            return false;
        }

        // 过滤标点符号
        if (word.matches("[\\p{Punct}]+")) {
            return false;
        }

        // 过滤网址
        if (word.toLowerCase().matches("https?://[a-zA-Z0-9.-]+(?:/[a-zA-Z0-9%_./-]*)?")) {
            return false;
        }

        // 过滤停用词
        Set<String> stopWords = new HashSet<>(Arrays.asList(
                "什么", "怎么", "多少", "如何", "哪些", "怎样", "可以", "怎么办",
                "哪个", "为什么", "一个", "哪里", "多久", "关于"
        ));
        return !stopWords.contains(word);
    }
}
