package org.example.controller;

/**
 * @author lijun
 * @date 2025-11-07 17:38
 */
import java.io.*;
import java.util.*;
/**
 * 对于处理1GB大文件在1MB内存限制下求Top 100词频的问题，需要使用分治策略结合堆排序的解决方案。以下是完整的Java实现方案
 * @author lijun
 * @date 2025/11/7 17:57
 * @param null 
 * @return null
 */
public class Top100Words {

    // 存储词及其频率的辅助类
    static class WordCount {
        String word;
        int count;

        public WordCount(String word, int count) {
            this.word = word;
            this.count = count;
        }
    }

    public static void main(String[] args) {
        // 配置参数（根据实际情况修改路径）
        String largeFilePath = "D:/big_file.txt";  // 1GB大文件路径
        String splitDir = "D:/file";           // 临时分割文件目录
        int initialSplits = 1000;                       // 初始分割数量
        int maxMemoryBytes = 1024 * 1024*50;               // 1MB内存限制

        try {
            // 1. 分割大文件为小文件（每个≈200KB）
            splitLargeFile(largeFilePath, splitDir, initialSplits);

            // 2. 检查并二次分割超过1MB的小文件
            checkAndSplitOversizedFiles(splitDir, maxMemoryBytes, initialSplits);

            // 3. 收集所有分割后的小文件路径
            List<String> allSplitFiles = collectAllSplitFiles(splitDir);

            // 4. 合并所有小文件的Top100，得到最终结果
            List<WordCount> finalTop100 = mergeTopResults(allSplitFiles);

            // 输出结果
            printTopResults(finalTop100);

        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            // 可选：清理临时文件
            deleteTempDir(new File(splitDir));
        }
    }

    /**
     * 第一步：将大文件按哈希值分割为多个小文件
     */
    private static void splitLargeFile(String largeFilePath, String outputDir, int numSplits) throws IOException {
        File dir = new File(outputDir);
        if (!dir.exists()) dir.mkdirs();

        // 缓存打开的文件写入流（避免频繁创建/关闭）
        Map<Integer, BufferedWriter> writerMap = new HashMap<>();
        BufferedReader reader = new BufferedReader(new FileReader(largeFilePath));
        String word;

        while ((word = reader.readLine()) != null) {
            if (word.isEmpty()) continue;

            // 哈希取模决定写入哪个小文件
            int hash = word.hashCode();
            int splitIndex = Math.abs(hash) % numSplits;

            // 获取或创建对应小文件的写入流
            BufferedWriter writer = writerMap.get(splitIndex);
            if (writer == null) {
                String fileName = outputDir + File.separator + "split_" + splitIndex + ".txt";
                writer = new BufferedWriter(new FileWriter(fileName, true));
                writerMap.put(splitIndex, writer);
            }

            writer.write(word);
            writer.newLine();
        }

        // 关闭所有资源
        reader.close();
        for (BufferedWriter writer : writerMap.values()) {
            writer.close();
        }
    }

    /**
     * 第二步：检查小文件大小，超过内存限制则递归分割
     */
    private static void checkAndSplitOversizedFiles(String dirPath, int maxSize, int numSplits) throws IOException {
        File dir = new File(dirPath);
        File[] files = dir.listFiles();
        if (files == null) return;

        for (File file : files) {
            if (file.isDirectory()) {
                // 递归检查子目录
                checkAndSplitOversizedFiles(file.getPath(), maxSize, numSplits);
            } else if (file.length() > maxSize) {
                // 文件过大，创建子目录再次分割
                String subDir = dirPath + File.separator + "sub_" + file.getName().replace(".txt", "");
                new File(subDir).mkdirs();

                // 分割过大的文件
                splitLargeFile(file.getPath(), subDir, numSplits);
                // 删除原大文件
                file.delete();
            }
        }
    }

    /**
     * 收集所有分割后的小文件路径（包括子目录）
     */
    private static List<String> collectAllSplitFiles(String dirPath) {
        List<String> filePaths = new ArrayList<>();
        File dir = new File(dirPath);
        File[] files = dir.listFiles();
        if (files == null) return filePaths;

        for (File file : files) {
            if (file.isDirectory()) {
                filePaths.addAll(collectAllSplitFiles(file.getPath()));
            } else if (file.getName().startsWith("split_")) {
                filePaths.add(file.getPath());
            }
        }
        return filePaths;
    }

    /**
     * 统计单个小文件的词频，并返回该文件的Top100
     */
    private static List<WordCount> getFileTop100(String filePath) throws IOException {
        Map<String, Integer> countMap = new HashMap<>();
        BufferedReader reader = new BufferedReader(new FileReader(filePath));
        String word;

        // 统计词频
        while ((word = reader.readLine()) != null) {
            if (word.isEmpty()) continue;
            countMap.put(word, countMap.getOrDefault(word, 0) + 1);
        }
        reader.close();

        // 用小顶堆筛选Top100（堆顶为当前最小的Top元素）
        PriorityQueue<WordCount> minHeap = new PriorityQueue<>(Comparator.comparingInt(wc -> wc.count));
        for (Map.Entry<String, Integer> entry : countMap.entrySet()) {
            if (minHeap.size() < 100) {
                minHeap.add(new WordCount(entry.getKey(), entry.getValue()));
            } else if (entry.getValue() > minHeap.peek().count) {
                minHeap.poll();
                minHeap.add(new WordCount(entry.getKey(), entry.getValue()));
            }
        }

        // 转换为从大到小的列表
        List<WordCount> top100 = new ArrayList<>();
        while (!minHeap.isEmpty()) {
            top100.add(minHeap.poll());
        }
        Collections.reverse(top100);
        return top100;
    }

    /**
     * 合并所有小文件的Top100，得到最终的全局Top100
     */
    private static List<WordCount> mergeTopResults(List<String> allFiles) throws IOException {
        // 收集所有小文件的Top100候选词
        List<WordCount> allCandidates = new ArrayList<>();
        for (String file : allFiles) {
            allCandidates.addAll(getFileTop100(file));
        }

        // 再次用小顶堆筛选全局Top100
        PriorityQueue<WordCount> minHeap = new PriorityQueue<>(Comparator.comparingInt(wc -> wc.count));
        for (WordCount wc : allCandidates) {
            if (minHeap.size() < 100) {
                minHeap.add(wc);
            } else if (wc.count > minHeap.peek().count) {
                minHeap.poll();
                minHeap.add(wc);
            }
        }

        // 转换为从大到小的列表
        List<WordCount> finalTop100 = new ArrayList<>();
        while (!minHeap.isEmpty()) {
            finalTop100.add(minHeap.poll());
        }
        Collections.reverse(finalTop100);
        return finalTop100;
    }

    /**
     * 打印Top100结果
     */
    private static void printTopResults(List<WordCount> top100) {
        System.out.println("Top 100 高频词：");
        for (int i = 0; i < top100.size(); i++) {
            WordCount wc = top100.get(i);
            System.out.printf("%d. %s 出现次数：%d%n", i + 1, wc.word, wc.count);
        }
    }

    /**
     * 清理临时目录
     */
    private static void deleteTempDir(File dir) {
        if (dir.isDirectory()) {
            File[] files = dir.listFiles();
            if (files != null) {
                for (File f : files) {
                    deleteTempDir(f);
                }
            }
        }
        dir.delete();
    }
}
