package com.bigdata.assignment.problem1;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;

import java.io.*;
import java.net.URI;
import java.util.*;

/**
 * WordCountDriver
 * ----------------
 * 功能：配置并运行 WordCount 作业，包含 HDFS 操作、统计信息生成与结果输出。
 *
 * 执行流程：
 * 1. 检查输入路径是否存在
 * 2. 删除已存在的输出路径
 * 3. 配置 Job 参数（Mapper、Reducer、输入输出路径）
 * 4. 提交作业并计时
 * 5. 作业完成后统计结果（总词数、不重复词数、输入文件数、处理时间）
 * 6. 输出到 words.txt 和 statistics.txt
 */
public class WordCountDriver {

    public static void main(String[] args) throws Exception {
        System.err.println("✅ 成功连接开始运行 WordCountDriver args.length：" + args.length);
        if (args.length != 2) {
            System.err.println("Usage: WordCountDriver <input path> <output path>");
            System.exit(-1);
        }
        // ===========================
        // 1. 初始化配置与计时
        // ===========================
        Configuration conf = new Configuration();
        String fsDefault = conf.get("fs.defaultFS");
        if (fsDefault == null || fsDefault.contains("localhost")) {
            System.out.println("⚠️ 检测到 Hadoop 配置异常，自动修正为 hdfs://hadoop-master:9000");
            conf.set("fs.defaultFS", "hdfs://hadoop-master:9000");
        } else {
            System.out.println("✅ 当前 Hadoop 文件系统: " + fsDefault);
        }

        FileSystem fs = FileSystem.get(conf);
        Path inputPath = new Path(args[0]);
        Path outputPath = new Path(args[1]);

        long startTime = System.currentTimeMillis();

        // ===========================
        // 2. 检查输入路径
        // ===========================
        if (!fs.exists(inputPath)) {
            System.err.println("输入目录不存在: " + inputPath);
            System.exit(-1);
        } else {
            System.out.println("✅ 输入目录存在: " + inputPath);
        }

        // ===========================
        // 3. 删除旧的输出目录
        // ===========================
        if (fs.exists(outputPath)) {
            fs.delete(outputPath, true);
            System.out.println("🧹 已清理旧的输出目录: " + outputPath);
        }

        // ===========================
        // 4. 配置 MapReduce 作业
        // ===========================
        Job job = Job.getInstance(conf, "WordCount - Problem1");
        job.setJarByClass(WordCountDriver.class);

        job.setMapperClass(WordCountMapper.class);
        job.setReducerClass(WordCountReducer.class);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(IntWritable.class);

        FileInputFormat.addInputPath(job, inputPath);
        FileOutputFormat.setOutputPath(job, outputPath);

        // ===========================
        // 5. 提交作业并等待完成
        // ===========================
        boolean success = job.waitForCompletion(true);
        long endTime = System.currentTimeMillis();

        if (!success) {
            System.err.println("❌ 作业执行失败");
            System.exit(1);
        }

        // ===========================
        // 6. 统计结果生成
        // ===========================
        long processingTime = endTime - startTime;
        System.out.println("✅ 作业执行成功，耗时：" + processingTime + " ms");

        // 统计输入文件数
        int inputFiles = fs.listStatus(inputPath).length;

        // 读取输出结果（统计总词数和不重复词数）
        int totalWords = 0;
        int uniqueWords = 0;
        List<String> wordLines = new ArrayList<>();

        Path resultFile = new Path(outputPath, "part-r-00000");
        BufferedReader br = new BufferedReader(new InputStreamReader(fs.open(resultFile)));
        String line;
        while ((line = br.readLine()) != null) {
            wordLines.add(line);
            String[] parts = line.split("\t");
            if (parts.length == 2) {
                int count = Integer.parseInt(parts[1]);
                totalWords += count;
                uniqueWords++;
            }
        }
        br.close();

        // ===========================
        // 7. 保存结果文件
        // ===========================
        Path wordsFile = new Path(outputPath, "words.txt");
        Path statsFile = new Path(outputPath, "statistics.txt");

        // words.txt 按字典序输出
        Collections.sort(wordLines);
        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(wordsFile)))) {
            for (String wline : wordLines) {
                bw.write(wline + "\n");
            }
        }

        // statistics.txt 输出统计信息
        try (BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(fs.create(statsFile)))) {
            bw.write("input_files\t" + inputFiles + "\n");
            bw.write("processing_time\t" + processingTime + "\n");
            bw.write("total_words\t" + totalWords + "\n");
            bw.write("unique_words\t" + uniqueWords + "\n");
        }

        System.out.println("📄 已生成结果文件：");
        System.out.println("  ├── words.txt");
        System.out.println("  └── statistics.txt");
    }
}
