package org.example;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.state.ValueState;
import org.apache.flink.api.common.state.ValueStateDescriptor;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.util.Collector;

/**
 * Flink 状态快照与容错处理（State Backend & Checkpoint）示例
 * 本示例展示如何通过状态快照实现容错处理：
 * 1. 配置 State Backend（如 FsStateBackend）和 Checkpoint。
 * 2. 使用 KeyedProcessFunction 管理有状态的流处理。
 * 3. 当作业失败时可通过 Checkpoint 恢复状态，实现高可用。
 * 适合初学者学习 Flink 容错机制和状态管理。
 */
public class StateBackendCheckpointExample {

    public static void main(String[] args) throws Exception {
        // 创建流处理执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置并行度为 1，方便观察输出
        env.setParallelism(1);

        // 配置 State Backend（文件系统）和 Checkpoint
        env.setStateBackend(new FsStateBackend("file:///tmp/flink-checkpoints"));
        env.enableCheckpointing(10000, CheckpointingMode.EXACTLY_ONCE); // 每 10 秒做一次快照

        // 模拟输入数据流
        DataStream<String> textStream = env.fromElements(
            "user1,10",
            "user2,20",
            "user1,5",
            "user2,15"
        );

        // 解析输入数据，转换为 (user, value) 的元组
        DataStream<Tuple2<String, Integer>> userValueStream = textStream
            .map(new MapFunction<String, Tuple2<String, Integer>>() {
                @Override
                public Tuple2<String, Integer> map(String value) {
                    String[] parts = value.split(",");
                    return new Tuple2<>(parts[0], Integer.parseInt(parts[1]));
                }
            });

        // 使用 KeyedProcessFunction 管理有状态的流处理，统计每个用户的累计值
        DataStream<Tuple2<String, Integer>> resultStream = userValueStream
            .keyBy(t -> t.f0)
            .process(new KeyedProcessFunction<String, Tuple2<String, Integer>, Tuple2<String, Integer>>() {
                // 定义 ValueState 用于保存每个 key 的累计值
                private transient ValueState<Integer> sumState;

                @Override
                public void open(org.apache.flink.configuration.Configuration parameters) {
                    ValueStateDescriptor<Integer> descriptor = new ValueStateDescriptor<>(
                        "sumState", Integer.class, 0);
                    sumState = getRuntimeContext().getState(descriptor);
                }

                @Override
                public void processElement(Tuple2<String, Integer> value, Context ctx, Collector<Tuple2<String, Integer>> out) throws Exception {
                    Integer currentSum = sumState.value();
                    currentSum += value.f1;
                    sumState.update(currentSum);
                    out.collect(new Tuple2<>(value.f0, currentSum));
                }
            });

        // 输出结果
        resultStream.print("状态快照结果");

        // 执行流处理作业
        env.execute("State Backend & Checkpoint Example");
    }
} 