package org.example.comp_algorithm_back.service.impl;

import org.ansj.splitWord.analysis.ToAnalysis;
import org.apdplat.word.WordFrequencyStatistics;
import org.apdplat.word.segmentation.SegmentationAlgorithm;
import org.example.comp_algorithm_back.common.CommonResponse;
import org.example.comp_algorithm_back.service.TestService;
import org.example.comp_algorithm_back.util.DataCleanUtil;
import org.example.comp_algorithm_back.util.PathClass;
import org.example.comp_algorithm_back.vo.DataVo;
import org.example.comp_algorithm_back.vo.WordNumVo;
import org.springframework.stereotype.Service;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.seg.common.Term;
import java.io.*;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;


@Service
public class TestServiceImpl implements TestService {

    @Override
    public CommonResponse<ArrayList<WordNumVo>> getWordFrequency(int num) {
        ArrayList<WordNumVo> result = new ArrayList<>();

        DataCleanUtil.Check();
        // 读取文件内容
        StringBuilder contentBuilder = new StringBuilder();
        try (BufferedReader br = new BufferedReader(new FileReader(PathClass.wordOut))) {
            String currentLine;
            while ((currentLine = br.readLine()) != null) {
                contentBuilder.append(currentLine).append("\n");
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        // 获取文件内容
        String content = contentBuilder.toString();
        // 使用HanLP进行分词
        List<Term> termList = HanLP.segment(content);
        // 统计词频
        Map<String, Integer> wordCountMap = new HashMap<>();
        for (Term term : termList) {
            String word = term.word;
            if (isValidWord(word)) { // 过滤停用词
                wordCountMap.put(word, wordCountMap.getOrDefault(word, 0) + 1);
            }
        }

        // 获取词频最高的前20个词
        List<Map.Entry<String, Integer>> sortedEntries = new ArrayList<>(wordCountMap.entrySet());
        sortedEntries.sort(Map.Entry.<String, Integer>comparingByValue().reversed());

        // 输出前20个词
        int count = 0;
        for (Map.Entry<String, Integer> entry : sortedEntries) {
            if (count >= num + 1) break;
            if(count != 0) {
                result.add(new WordNumVo(entry.getKey(), entry.getValue()));
            }
            count++;
        }

        return CommonResponse.createForSuccess("词频查找完毕", result);
    }


    // 判断是否为有效的词（即不是标点符号或网址）
    private static boolean isValidWord(String word) {
        //短
        if (word.length() < 2) {
            return false;
        }

        // 过滤标点符号
        if (word.matches("[\\p{Punct}]+")) {
            return false;
        }

        // 过滤网址
        if (word.toLowerCase().matches("https?://[a-zA-Z0-9.-]+(?:/[a-zA-Z0-9%_./-]*)?")) {
            return false;
        }

        // 过滤停用词
        Set<String> stopWords = new HashSet<>(Arrays.asList("什么", "怎么", "多少", "如何", "哪些", "怎样", "可以", "怎么办", "哪个", "为什么", "一个", "哪里", "多久", "关于"));
        return !stopWords.contains(word);
    }



    @Override
    public CommonResponse<ArrayList<DataVo>> getNumAnalysis() throws Exception {
        ArrayList<DataVo> result = new ArrayList<>();
        long time;
        DataVo dataVo = new DataVo();
        // 不同分词算法处理不同数量文件时间（有无多线程）
        dataVo.setName("Ansj_num");
        ArrayList<Long> data_list=new ArrayList<>();
        AnsjTime(-1,-1);
        for (int i = 1; i < 11; i++) {
            time=AnsjTime(i*40000,-1);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("Ansj_nothread_num");
        data_list=new ArrayList<>();
        AnsjTime(-1,-1);
        for (int i = 1; i < 11; i++) {
            time=AnsjTime(i*40000,1);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("HanLP_num");
        data_list=new ArrayList<>();
        HanLPTime(-1,-1);
        for (int i = 1; i < 11; i++) {
            time=HanLPTime(i*40000,-1);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("HanLP_nothread_num");
        data_list=new ArrayList<>();
        HanLPTime(-1,-1);
        for (int i = 1; i < 11; i++) {
            time=HanLPTime(i*40000,1);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("Word_num");
        data_list=new ArrayList<>();
        WordTime(-1);
        for (int i = 1; i < 11; i++) {
            time=WordTime(i*40000);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        return CommonResponse.createForSuccess(result);
    }

    public CommonResponse<ArrayList<DataVo>> getThreadAnalysis(){
        ArrayList<DataVo> result = new ArrayList<>();
        long time;
        DataVo dataVo = new DataVo();
        // 不同分词算法使用多线程时间
        dataVo.setName("Ansj_thread");
        ArrayList<Long> data_list=new ArrayList<>();
        AnsjTime(-1,-1);
        for (int i = 1; i < 9; i++) {
            time=AnsjTime(-1,i);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("Ansj_half_thread");
        data_list=new ArrayList<>();
        AnsjTime(-1,-1);
        for (int i = 1; i < 9; i++) {
            time=AnsjTime(20000,i);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("HanLP_thread");
        data_list=new ArrayList<>();
        HanLPTime(-1,-1);
        for (int i = 1; i < 9; i++) {
            time=HanLPTime(-1,i);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        dataVo = new DataVo();
        dataVo.setName("HanLP_half_thread");
        data_list=new ArrayList<>();
        HanLPTime(-1,-1);
        for (int i = 1; i < 9; i++) {
            time=HanLPTime(20000,i);
            data_list.add(time);
        }
        dataVo.setDataList(data_list);
        result.add(dataVo);
        return CommonResponse.createForSuccess(result);
    }
    public Long AnsjTime(int data_num, int Thread_num)
    {
        long startTime = System.currentTimeMillis();
        AnsjStatistic(PathClass.wordOut, PathClass.wordApart, PathClass.wordStatistics,data_num,Thread_num);
        long endTime = System.currentTimeMillis();
        long duration = endTime - startTime; // 单位是毫秒
        System.out.println("代码块执行时间: " + duration + " 毫秒");
        return duration;
    }

    public Long HanLPTime(int data_num, int Thread_num)
    {
        long startTime = System.currentTimeMillis();
        HanLPStatistic(PathClass.wordOut, PathClass.wordApart, PathClass.wordStatistics,data_num,Thread_num);
        long endTime = System.currentTimeMillis();
        long duration = endTime - startTime; // 单位是毫秒
        System.out.println("代码块执行时间: " + duration + " 毫秒");
        return duration;
    }

    public Long WordTime(int data_num) throws Exception {
        long startTime = System.currentTimeMillis();
        WordStatistic(PathClass.wordOut, PathClass.wordApart, PathClass.wordStatistics,data_num);
        long endTime = System.currentTimeMillis();
        long duration = endTime - startTime; // 单位是毫秒
        System.out.println("代码块执行时间: " + duration + " 毫秒");
        return duration;
    }

    public static void AnsjStatistic(String wordOut, String wordApart, String wordStatistics, int data_num, int Thread_num){
        // 用于存储词频的并发哈希映射
        ConcurrentHashMap<String, Integer> wordFrequencyMap = new ConcurrentHashMap<>();
        // 用于存储分词结果
        List<List<org.ansj.domain.Term>> segmentedLines = Collections.synchronizedList(new ArrayList<>());
        // 创建固定大小的线程池
        int numThreads;
        if(Thread_num == -1){
            numThreads = Runtime.getRuntime().availableProcessors();
        }
        else {
            numThreads = Thread_num;
        }
        ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

        try (BufferedReader reader = Files.newBufferedReader(Paths.get(wordOut))) {
            String line;
            int i=0;
            while ((line = reader.readLine()) != null&&(data_num>i||data_num==-1)) {
                // 提交任务到线程池
                String finalLine = line;
                executorService.submit(() -> AnsjProcessLine(finalLine, wordFrequencyMap,segmentedLines));
                i++;
            }
        } catch (IOException e) {
            e.printStackTrace();
        }

        // 关闭线程池
        executorService.shutdown();
        try {
            // 等待所有任务完成
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow();
            }
        } catch (InterruptedException e) {
            executorService.shutdownNow();
            Thread.currentThread().interrupt();
        }

        // 将词频映射转换为列表并排序
        List<Map.Entry<String, Integer>> sortedEntries = sortWordFrequency(wordFrequencyMap);
    }
    private static void AnsjProcessLine(String line, Map<String, Integer> wordFrequencyMap, List<List<org.ansj.domain.Term>> segmentedLines) {
        // 使用 Ansj 进行分词
        List<org.ansj.domain.Term> terms = ToAnalysis.parse(line).getTerms();
        List<org.ansj.domain.Term> termList = new ArrayList<>();
        for (org.ansj.domain.Term term : terms) {
            if (term.getName().length() > 1) {
                char firstChar = term.getNatureStr().charAt(0);
                if (firstChar != 'w' && firstChar != 'r' && firstChar != 'm') {
                    // 更新词频
                    wordFrequencyMap.merge(term.getName(), 1, Integer::sum);
                    termList.add(term);
                }
            }
        }
        terms=termList;
        segmentedLines.add(terms);
    }
    private static List<Map.Entry<String, Integer>> sortWordFrequency(Map<String, Integer> wordFrequencyMap) {
        List<Map.Entry<String, Integer>> sortedEntries = new ArrayList<>(wordFrequencyMap.entrySet());
        sortedEntries.sort((e1, e2) -> e2.getValue().compareTo(e1.getValue()));
        return sortedEntries;
    }

    public static void HanLPStatistic(String wordOut, String wordApart, String wordStatistics, int data_num, int Thread_num){
        // 用于存储词频的并发哈希映射
        ConcurrentHashMap<String, Integer> wordFrequencyMap = new ConcurrentHashMap<>();
        // 用于存储分词结果
        List<List<com.hankcs.hanlp.seg.common.Term>> segmentedLines = Collections.synchronizedList(new ArrayList<>());
        // 创建固定大小的线程池
        int numThreads;
        if(Thread_num == -1){
            numThreads = Runtime.getRuntime().availableProcessors();
        }
        else {
            numThreads = Thread_num;
        }
        ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

        try (BufferedReader reader = Files.newBufferedReader(Paths.get(wordOut))) {
            String line;
            int i=0;
            while ((line = reader.readLine()) != null&&(data_num>i||data_num==-1)) {
                // 提交任务到线程池
                String finalLine = line;
                executorService.submit(() -> HanLPProcessLine(finalLine, wordFrequencyMap,segmentedLines));
                i++;
            }
        } catch (IOException e) {
            e.printStackTrace();
        }

        // 关闭线程池
        executorService.shutdown();
        try {
            // 等待所有任务完成
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow();
            }
        } catch (InterruptedException e) {
            executorService.shutdownNow();
            Thread.currentThread().interrupt();
        }

        // 将词频映射转换为列表并排序
        List<Map.Entry<String, Integer>> sortedEntries = sortWordFrequency(wordFrequencyMap);
    }
    private static void HanLPProcessLine(String line, Map<String, Integer> wordFrequencyMap, List<List<com.hankcs.hanlp.seg.common.Term>> segmentedLines) {
        // 使用 HanLP 进行分词
        List<com.hankcs.hanlp.seg.common.Term> terms = HanLP.segment(line);
        List<com.hankcs.hanlp.seg.common.Term> termList = new ArrayList<>();
        for (com.hankcs.hanlp.seg.common.Term term : terms) {
            if (term.word.length() > 1) {
                char firstChar = String.valueOf(term.nature).charAt(0);
                if (firstChar != 'w' && firstChar != 'r' && firstChar != 'm') {
                    // 更新词频
                    wordFrequencyMap.merge(term.word, 1, Integer::sum);
                    termList.add(term);
                }
            }
        }
        terms=termList;
        segmentedLines.add(terms);
    }
    public static void WordStatistic(String wordOut, String wordApart, String wordStatistics, int max_num) throws Exception {
        //词频统计设置
        WordFrequencyStatistics wordFrequencyStatistics = new WordFrequencyStatistics();
        wordFrequencyStatistics.setRemoveStopWord(true);//去掉虚词和一般的连词
        wordFrequencyStatistics.setResultPath(wordStatistics);
        wordFrequencyStatistics.setSegmentationAlgorithm(SegmentationAlgorithm.MaxNgramScore);
        try {
            // 创建临时文件
            Path tempFilePath = Files.createTempFile("temp", ".txt");
            File tempFile = tempFilePath.toFile();
            tempFile.deleteOnExit(); // 确保临时文件在程序退出时删除

            // 读取前100行并写入临时文件
            try (BufferedReader br = new BufferedReader(new FileReader(wordOut));
                 BufferedWriter bw = new BufferedWriter(new FileWriter(tempFile))) {
                String line;
                int lineCount = 0;

                while ((line = br.readLine()) != null && (lineCount < max_num||max_num<0)) {
                    bw.write(line);
                    bw.newLine();
                    lineCount++;
                }
            }
            wordFrequencyStatistics.seg(tempFile, new File(wordApart));
        } catch (Exception e) {
            System.out.println(e.getMessage());
        }

    }
}
