package org.example.comp_algorithm_back.algorithm;

import com.hankcs.hanlp.HanLP;
import com.hankcs.hanlp.dictionary.CustomDictionary;
import com.hankcs.hanlp.seg.Segment;
import com.hankcs.hanlp.seg.common.Term;
import org.example.comp_algorithm_back.util.PathClass;

import java.io.*;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;

public class HanlpWord {

    public static void statistic(String wordOut, String wordApart, String wordStatistics) throws IOException {
        //定义自定义词典
        InputStreamReader inStream0 = new InputStreamReader(new FileInputStream(PathClass.word1), StandardCharsets.UTF_8);
        BufferedReader bf0 = new BufferedReader(inStream0);
        String a=null;
        while((a=bf0.readLine())!=null){
            CustomDictionary.add(a);
        }
        // 用于存储词频的并发哈希映射
        ConcurrentHashMap<String, Integer> wordFrequencyMap = new ConcurrentHashMap<>();
        // 用于存储分词结果
        List<List<Term>> segmentedLines = Collections.synchronizedList(new ArrayList<>());
        // 创建固定大小的线程池
        int numThreads = Runtime.getRuntime().availableProcessors();
        ExecutorService executorService = Executors.newFixedThreadPool(numThreads);

        try (BufferedReader reader = Files.newBufferedReader(Paths.get(wordOut))) {
            String line;
            while ((line = reader.readLine()) != null) {
                // 提交任务到线程池
                String finalLine = line;
                executorService.submit(() -> processLine(finalLine, wordFrequencyMap,segmentedLines));
            }
        } catch (IOException e) {
            e.printStackTrace();
        }

        // 关闭线程池
        executorService.shutdown();
        try {
            // 等待所有任务完成
            if (!executorService.awaitTermination(60, TimeUnit.SECONDS)) {
                executorService.shutdownNow();
            }
        } catch (InterruptedException e) {
            executorService.shutdownNow();
            Thread.currentThread().interrupt();
        }

        // 将词频映射转换为列表并排序
        List<Map.Entry<String, Integer>> sortedEntries = sortWordFrequency(wordFrequencyMap);

        // 输出词频统计结果
        try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(wordStatistics))) {
            for (Map.Entry<String, Integer> entry : sortedEntries) {
                writer.write(entry.getKey() + " " + entry.getValue());
                writer.newLine();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
        // 输出分词结果
        try (BufferedWriter writer = Files.newBufferedWriter(Paths.get(wordApart))) {
            for (List<Term> terms : segmentedLines) {
                for (Term term : terms) {
                    writer.write(term.word + " ");
                }
                writer.newLine();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
    private static void processLine(String line, Map<String, Integer> wordFrequencyMap,List<List<Term>> segmentedLines) {
        // 使用 HanLP 进行分词
        List<Term> terms = HanLP.segment(line);
        List<Term> termList = new ArrayList<>();
//        for (Term term : terms) {
//            String word = term.word.trim();
//            if (term.word.length()>1&&!term.nature.startsWith("w")&&!term.nature.startsWith("r")) {
//                // 更新词频
//                wordFrequencyMap.merge(word, 1, Integer::sum);
//                termList.add(term);
//            }
//        }
        for (Term term : terms) {
            if (term.word.length() > 1) {
                char firstChar = String.valueOf(term.nature).charAt(0);
                if (firstChar != 'w' && firstChar != 'r'&& firstChar != 'v' && firstChar != 'm') {
                    // 更新词频
                    wordFrequencyMap.merge(term.word, 1, Integer::sum);
                    termList.add(term);
                }
            }
        }
        terms=termList;
        segmentedLines.add(terms);
    }
    private static List<Map.Entry<String, Integer>> sortWordFrequency(Map<String, Integer> wordFrequencyMap) {
        List<Map.Entry<String, Integer>> sortedEntries = new ArrayList<>(wordFrequencyMap.entrySet());
        sortedEntries.sort((e1, e2) -> e2.getValue().compareTo(e1.getValue()));
        return sortedEntries;
    }
}
