package cn.itcast.flink.start;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext; import org.apache.flink.runtime.state.filesystem.FsStateBackend; import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource; import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction; import java.util.Arrays;
import java.util.concurrent.TimeUnit;
/**
* Flink State 中OperatorState，自定义数据源Kafka消费数据，保存消费偏移量数据并进行Checkpoint
*/
public class StateOperatorStateDemo_2 {
/**
* 自定义数据源Source，模拟从Kafka消费数据（类似FlinkKafkaConsumer），并实现offset状态维护
*/
private static class FlinkKafkaSource extends RichParallelSourceFunction<String> implements CheckpointedFunction {
            private volatile boolean isRunning = true ;
            // TODO：a. 声 明 OperatorState 存 储 offset
            private ListState<Long> offsetState = null ; private Long offset = 0L ;
            @Override
            public void initializeState(
                    FunctionInitializationContext context) throws Exception {
            // TODO: b. 创建状态描述
            ListStateDescriptor<Long> descriptor = new ListStateDescriptor<>( "offsetState", Long.class
            );
            // TODO: c. 实例化状态对象
            offsetState = context.getOperatorStateStore().getListState(descriptor);
            // 判断状态是否从Checkpoint或SnapShot恢复，是的话获取偏移量
            if(context.isRestored()){
            // TODO：d. 从State获取值
            offset = offsetState.get().iterator().next();
            }
            }
            // 快照方法会在Checkpoint的时候执行,把当前的State存入到Checkpoint中
            @Override
            public void snapshotState(FunctionSnapshotContext context) throws Exception {
            // TODO: e. 将状态数据保存至Checkpoint中
                offsetState.clear();
            // TODO: f. 将list存储State中
                offsetState.update(Arrays.asList(offset));
            }
                @Override
                public void run(SourceContext<String> ctx) throws Exception { while (isRunning){
            // 获取索引，分区ID
                    int partitionId = getRuntimeContext().getIndexOfThisSubtask();
            // 更新偏移量信息
                    offset = offset + 1L ;
            // 输出操作
                    ctx.collect("partition: " + partitionId + ", offset: " + offset);
                    TimeUnit.SECONDS.sleep(1);
            // 每隔5条消息，模拟一个异常
                    if(offset % 5 == 0){
                        throw new RuntimeException("程序出现异常，遇到Bug啦................") ;
                    }
                }
                }
                @Override
                public void cancel() { isRunning = false ;
                }
            }

                public static void main(String[] args) throws Exception {
            // 1. 执行环境-env
                    StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(1);
            // TODO: 设置检查点Checkpoint相关属性，保存状态
                    env.enableCheckpointing(2000) ; // 每隔1s执行一次Checkpoint
                    env.setStateBackend(new FsStateBackend("file:///D:/ckpt/")) ; // 状态数据保存本地文件系统
                    env.getCheckpointConfig().enableExternalizedCheckpoints( CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
                    );// 当应用取消时，Checkpoint数据保存，不删除
                    env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
                    // 固定延迟重启策略: 程序出现异常的时候，重启2次，每次延迟3秒钟重启，超过2次，程序退出
                    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000));

                    // 2. 数据源-source
                    DataStreamSource<String> kafkaDataStream = env.addSource(new FlinkKafkaSource());
                    // 4. 数 据 终 端 -sink
                     kafkaDataStream.printToErr();
                    // 5. 触 发 执 行 -execute
                     env.execute(StateOperatorStateDemo_2.class.getSimpleName());
    }
}