package com.bigdata.assignment.problem2;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.*;
import java.util.*;

/**
 * Problem2 Driver
 * - 支持通过命令行参数启用/禁用 Combiner
 * - 支持通过命令行参数切换不同 Partitioner
 *
 * 命令行参数格式：
 * args[0] = 输入目录
 * args[1] = 输出目录
 * args[2] = Partitioner 名称（可选）
 * args[3] = false / true
 *
 * 示例：
 * hadoop jar xxx.jar input output AlphabetPartitioner USE_COMBINER
 * hadoop jar xxx.jar input output RandomPartitioner NO_COMBINER
 * hadoop jar xxx.jar input output # 默认：AlphabetPartitioner + USE_COMBINER
 */
public class WordCountDriver {

    public static void main(String[] args) throws Exception {

        if (args.length < 2) {
            System.err.println("Usage: WordCountDriver <input> <output> [PartitionerName] [USE_COMBINER|NO_COMBINER]");
            System.exit(-1);
        }

        String inputDir = args[0];
        String outputDir = args[1];

        // 解析可选参数
        String partitionerName = (args.length >= 3) ? args[2] : "AlphabetPartitioner";
        String combinerFlag = (args.length >= 4) ? args[3] : "USE_COMBINER";

        // 输出参数信息
        System.out.println("=== Job Configuration ===");
        System.out.println("Input Path        : " + inputDir);
        System.out.println("Output Path       : " + outputDir);
        System.out.println("Partitioner       : " + partitionerName);
        System.out.println("Combiner Enabled? : " + combinerFlag);
        System.out.println("=========================");

        Configuration conf = new Configuration();
        FileSystem fs = FileSystem.get(conf);

        Path inputPath = new Path(inputDir);
        Path outputPath = new Path(outputDir);

        if (!fs.exists(inputPath)) {
            System.err.println("❌ 输入路径不存在: " + inputPath);
            System.exit(1);
        }

        // 清理旧输出
        if (fs.exists(outputPath)) {
            fs.delete(outputPath, true);
        }

        long startTime = System.currentTimeMillis();

        Job job = Job.getInstance(conf, "Problem2 - WordCount-" + combinerFlag + "-" + partitionerName);

        job.setJarByClass(WordCountDriver.class);
        job.setMapperClass(WordCountMapper.class);

        // ✔ 是否启用 Combiner
        if (combinerFlag.equalsIgnoreCase("true")) {
            System.out.println("✅ 启动 Combiner");
            job.setCombinerClass(WordCountCombiner.class);
        }

        job.setReducerClass(WordCountReducer.class);

        // ✔ 根据参数选择 Partitioner
        job.setPartitionerClass(resolvePartitioner(partitionerName));

        job.setNumReduceTasks(4);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, inputPath);
        FileOutputFormat.setOutputPath(job, outputPath);

        boolean success = job.waitForCompletion(true);
        if (!success) {
            System.err.println("❌ Job 执行失败");
            System.exit(1);
        }

        long totalTime = System.currentTimeMillis() - startTime;
        System.out.println("✅ Job 完成，总耗时：" + totalTime + " ms");

        // === 统计与写出文件（与你原来的相同） ===
        writeStatistics(fs, job, outputPath, totalTime);

        System.out.println("📄 已生成 words.txt 和 statistics.txt");
    }

    /**
     * 根据字符串返回对应的 Partitioner class
     */
    private static Class<? extends Partitioner> resolvePartitioner(String name) {
        switch (name.toLowerCase()) {
            case "alphabetpartitioner":
                System.out.println("✅ 使用 AlphabetPartitioner");
                return AlphabetPartitioner.class;
            case "randompartitioner":
                System.out.println("✅ 使用 RandomPartitioner");
                return RandomPartitioner.class;
            case "hashpartitioner":
                System.out.println("✅ 使用 HashPartitioner");
                return HashPartitioner.class;
            case "lengthpartitioner":
                System.out.println("✅ 使用 LengthPartitioner");
                return LengthPartitioner.class;
            default:
                System.out.println("⚠ 未知 Partitioner: " + name + "，使用 AlphabetPartitioner");
                return AlphabetPartitioner.class;
        }
    }

    /**
     * 写出统计文件（增强版）
     * - 收集 Combiner 前后 Map 输出记录对比
     * - 统计不同 Partitioner 下各 Reducer 的记录数（partition_0..partition_n）
     * - 收集运行时间、map/reduce 任务数、输入/输出记录数
     * - 尝试抓取与内存/CPU/堆相关的 Counters（若存在）
     */
    private static void writeStatistics(FileSystem fs, Job job, Path outputPath, long processingTime) throws Exception {

        Counters counters = job.getCounters();

        // 标准 Task / Job counters
        long mapTasksLaunched = counters.findCounter(JobCounter.TOTAL_LAUNCHED_MAPS).getValue();
        long reduceTasksLaunched = counters.findCounter(JobCounter.TOTAL_LAUNCHED_REDUCES).getValue();

        long mapInputRecords = counters.findCounter(TaskCounter.MAP_INPUT_RECORDS).getValue();
        long mapOutputRecords = counters.findCounter(TaskCounter.MAP_OUTPUT_RECORDS).getValue();
        long combineInputRecords = 0;
        long combineOutputRecords = 0;
        // Some Hadoop versions provide these TaskCounter enums:
        try {
            combineInputRecords = counters.findCounter(TaskCounter.COMBINE_INPUT_RECORDS).getValue();
            combineOutputRecords = counters.findCounter(TaskCounter.COMBINE_OUTPUT_RECORDS).getValue();
        } catch (Exception ignore) {
            // ignore if not present
        }
        long reduceInputRecords = counters.findCounter(TaskCounter.REDUCE_INPUT_RECORDS).getValue();
        long reduceOutputRecords = counters.findCounter(TaskCounter.REDUCE_OUTPUT_RECORDS).getValue();

        // 通过判断 combiner counters 是否为 0 来判定 combiner 是否启用（也可以通过传参）
        boolean combinerEnabled = (combineInputRecords > 0 || combineOutputRecords > 0);

        // 读取所有 part 文件，并按 part 文件索引统计每个 reducer 的记录数
        // 也把所有行收集起来用于 words.txt（按字典序）
        List<String> allLines = new ArrayList<>();
        // 使用 Map<partitionIndex, count>
        Map<Integer, Long> partitionCounts = new TreeMap<>();

        FileStatus[] stats = fs.listStatus(outputPath);
        for (FileStatus st : stats) {
            String name = st.getPath().getName();
            if (!name.startsWith("part-") || name.endsWith(".crc"))
                continue;

            // 尝试解析 part 文件名中的索引，如 part-r-00000 或 part-00000
            int partitionIdx = -1;
            try {
                // 支持多种命名：part-r-00000 或 part-00000 或 part-m-00000
                String digits = name.replaceAll(".*?(\\d{1,5})$", "$1");
                partitionIdx = Integer.parseInt(digits);
            } catch (Exception e) {
                // 如果解析失败，保持 partitionIdx = -1（将作为 fallback）
                partitionIdx = -1;
            }

            long localCount = 0;
            BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(st.getPath())));
            String line;
            while ((line = br.readLine()) != null) {
                allLines.add(line);
                localCount++;
            }
            br.close();

            // 记录到 map（若无法解析索引，则将其合并到 key -1）
            long prev = partitionCounts.getOrDefault(partitionIdx, 0L);
            partitionCounts.put(partitionIdx, prev + localCount);
        }

        // 将所有行按字典序排序并写入 words.txt
        Collections.sort(allLines);
        try (BufferedWriter bw = new BufferedWriter(
                new OutputStreamWriter(fs.create(new Path(outputPath, "words.txt"))))) {
            for (String s : allLines) {
                bw.write(s);
                bw.newLine();
            }
        }

        // 收集额外的资源相关 counters（尽可能提取含 "memory" / "cpu" / "heap" 的项）
        Map<String, Long> resourceCounters = new LinkedHashMap<>();
        for (CounterGroup g : counters) {
            for (Counter c : g) {
                String cname = c.getDisplayName().toLowerCase();
                if (cname.contains("memory") || cname.contains("cpu") || cname.contains("heap")
                        || cname.contains("vcore") || cname.contains("megabyte")) {
                    resourceCounters.put(g.getDisplayName() + ":" + c.getDisplayName(), c.getValue());
                }
            }
        }

        // 写 statistics.txt，包含所有必需统计项以及补充的资源指标
        try (BufferedWriter bw = new BufferedWriter(
                new OutputStreamWriter(fs.create(new Path(outputPath, "statistics.txt"))))) {

            bw.write("processing_time\t" + processingTime + "\n");

            bw.write("map_tasks_launched\t" + mapTasksLaunched + "\n");
            bw.write("reduce_tasks_launched\t" + reduceTasksLaunched + "\n");

            bw.write("map_input_records\t" + mapInputRecords + "\n");
            bw.write("map_output_records\t" + mapOutputRecords + "\n");

            bw.write("combine_input_records\t" + combineInputRecords + "\n");
            bw.write("combine_output_records\t" + combineOutputRecords + "\n");

            bw.write("reduce_input_records\t" + reduceInputRecords + "\n");
            bw.write("reduce_output_records\t" + reduceOutputRecords + "\n");

            bw.write("combiner_enabled\t" + combinerEnabled + "\n");

            // 按分区写 partition counts（如果存在解析到的索引）
            // 标准情形下会有 0..(numReduce-1)
            for (Map.Entry<Integer, Long> e : partitionCounts.entrySet()) {
                int idx = e.getKey();
                long cnt = e.getValue();
                if (idx >= 0) {
                    bw.write(String.format("partition_%d_records\t%d\n", idx, cnt));
                } else {
                    // 未能解析索引的文件，写作 partition_unknown
                    bw.write(String.format("partition_unknown_records\t%d\n", cnt));
                }
            }

            // 聚合统计： total_words 与 unique_words（如有需要可从 lines 中提取）
            long totalWords = 0;
            long uniqueWords = 0;
            for (String l : allLines) {
                String[] parts = l.split("\t");
                if (parts.length >= 2) {
                    try {
                        totalWords += Long.parseLong(parts[1]);
                        uniqueWords++;
                    } catch (NumberFormatException ignore) {
                    }
                }
            }
            bw.write("total_words\t" + totalWords + "\n");
            bw.write("unique_words\t" + uniqueWords + "\n");

            // 资源 counters 写出
            for (Map.Entry<String, Long> entry : resourceCounters.entrySet()) {
                bw.write(entry.getKey().replaceAll("\\s+", "_") + "\t" + entry.getValue() + "\n");
            }
        }
    }

}
