package cn.com.daimajiangxin.flink.wordcount;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.connector.file.src.FileSource;
import org.apache.flink.connector.file.src.impl.StreamFormatAdapter;
import org.apache.flink.connector.file.src.reader.StreamFormat;
import org.apache.flink.connector.file.src.reader.TextLineFormat;
import org.apache.flink.connector.file.src.reader.TextLineInputFormat;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.nio.charset.StandardCharsets;
import java.time.Duration;
import java.util.Arrays;

/**
 * 使用 Flink 1.20.1 最新文件读取 API 实现的批处理 WordCount
 *
 * 更新内容：
 * 1. 使用 FileSource API 替代过时的 readTextFile
 * 2. 添加 WatermarkStrategy 支持
 * 3. 添加性能优化配置
 * 4. 改进错误处理
 * 5. 添加更详细的进度监控
 */
public class BatchWordCount {

    public static void main(String[] args) throws Exception {
        // 参数校验
        if (args.length < 2) {
            System.err.println("Usage: BatchWordCount <input> <output> [--parallelism=N]");
            System.err.println("Example: BatchWordCount input.txt output.txt --parallelism=4");
            System.exit(1);
        }

        final String inputPath = args[0];
        final String outputPath = args[1];
        int parallelism = 1; // 默认并行度
        // 1. 创建流批一体执行环境
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 明确指定批处理模式
        env.setRuntimeMode(RuntimeExecutionMode.BATCH);

        // 设置并行度和作业名称
        env.setParallelism(parallelism);

        // 2. 设置任务监控（可选）
        env.isChainingEnabled();
        env.getConfig().enableObjectReuse();

        // 3. 使用最新的 FileSource API 读取输入数据
        DataStream<String> text = createFileSource(env, inputPath, parallelism);

        // 4. 定义处理逻辑
        SingleOutputStreamOperator<Tuple2<String, Integer>> counts = text
                // 分割单词
                .flatMap(new Tokenizer())
                .name("Tokenizer")
                .setParallelism(parallelism)

                // 按单词分组
                .keyBy(value -> value.f0)

                // 在批处理模式下，reduce 会生成最终结果
                .reduce(new SumReducer())
                .name("SumReducer")
                .setParallelism(parallelism)
                .returns(TypeInformation.of(new TypeHint<Tuple2<String, Integer>>() {}));

        // 5. 使用最新的 Sink API 输出结果到文件
        counts.writeAsText(outputPath)
                .name("FileSink")
                .setParallelism(1); // 文件输出使用单并行度

        // 6. 添加进度监听器（可选）
        // env.addProgressReporter(...);

        // 7. 执行作业
        try {
            System.out.println("Starting Flink WordCount job...");
            System.out.println("Input path: " + inputPath);
            System.out.println("Output path: " + outputPath);
            System.out.println("Parallelism: " + parallelism);

            long startTime = System.currentTimeMillis();

            // 执行作业并获取执行结果
            env.execute("Modern WordCount with FileSource API");

            long duration = System.currentTimeMillis() - startTime;
            System.out.printf("Job completed in %.2f seconds%n", duration / 1000.0);
            System.out.println("Output saved to: " + outputPath);

        } catch (Exception e) {
            System.err.println("Job execution failed: " + e.getMessage());
            e.printStackTrace();

            // 在异常时打印更多诊断信息
            System.err.println("Input paths: " + Arrays.asList(args));
            System.err.println("Environment configuration: " + env.getConfiguration());
        }
    }

    /**
     * 创建文件源 - 使用 Flink 1.20.1 推荐的 FileSource API
     *
     * @param env 执行环境
     * @param path 文件路径
     * @param parallelism 并行度
     * @return 包含文件内容的 DataStream
     */
    private static DataStream<String> createFileSource(StreamExecutionEnvironment env, String path, int parallelism) {
        // 1. 创建文件源构建器
        Path filePath = new Path(path);

        // 2. 配置文件读取格式
        StreamFormat<String> format =new TextLineInputFormat("UTF-8");

        // 3. 构建 FileSource
        FileSource<String> fileSource = FileSource
                .forRecordStreamFormat(format, filePath)
                .build();

        // 4. 添加 Watermark 策略（批处理中可使用默认策略）
        WatermarkStrategy<String> watermarkStrategy = WatermarkStrategy
                .<String>forMonotonousTimestamps()
                .withIdleness(Duration.ofSeconds(10));

        // 5. 创建数据源
        return env.fromSource(
                        fileSource,
                        watermarkStrategy,
                        "FileSource(" + filePath + ")"
                )
                .name("FileSource")
                .setParallelism(Math.min(parallelism, 4)); // 文件源并行度限制
    }

    /**
     * 单词分割器
     */
    public static final class Tokenizer implements FlatMapFunction<String, Tuple2<String, Integer>> {
        @Override
        public void flatMap(String value, Collector<Tuple2<String, Integer>> out) {
            // 过滤空行
            if (value == null || value.trim().isEmpty()) {
                return;
            }

            // 转换为小写并分割单词
            String[] words = value.toLowerCase().split("\\W+");

            for (String word : words) {
                if (!word.isEmpty()) {
                    out.collect(Tuple2.of(word, 1));
                }
            }
        }
    }

    /**
     * 计数器
     */
    public static final class SumReducer implements ReduceFunction<Tuple2<String, Integer>> {
        @Override
        public Tuple2<String, Integer> reduce(Tuple2<String, Integer> value1, Tuple2<String, Integer> value2) {
            return Tuple2.of(value1.f0, value1.f1 + value2.f1);
        }
    }
}