package cn.itcast.flink.base;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;

import java.util.Iterator;

/**
 * Author itcast
 * Date 2021/7/29 16:21
 * 使用 operatorState 来模拟 offset 的存储和数据的恢复
 *
 */
public class OperatorStateDemo {
    public static void main(String[] args) throws Exception {
        //1.创建流环境，便于观察设置并行度为 1
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //2.开启checkpoint ，并将状态保存到 file:///D:/chk
        env.enableCheckpointing(1000, CheckpointingMode.EXACTLY_ONCE);
        //3.设置checkpoint的配置 外部chk，仅一次语义等
        env.setStateBackend(new FsStateBackend("file:///D:/chk"));
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig
                .ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //4.开启重启策略 3秒钟尝试重启3次
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        //5.添加数据源比如 MyMonitorKafkaSource , 实例化创建 MyMonitorKafkaSource
        DataStreamSource<String> source = env.addSource(new MyMonitorKafkaSource());
        //6.打印输出
        source.print();
        //7.执行
        env.execute();
    }


    //创建 MyMonitorKafkaSource 继承 RichParallelSourceFunction<String> 并实现 CheckpointedFunction
    // 字符串存储 当前分区id和offset
    private static class MyMonitorKafkaSource extends RichParallelSourceFunction<String> implements CheckpointedFunction {
        private boolean flag = true;
        ListState<Long> offsetState = null;
        Long historyOffset = 0L;
        //重写 initializeState 方法 ListStateDescriptor 状态描述和通过context获取 offsetState
        @Override
        public void initializeState(FunctionInitializationContext context) throws Exception {
            //创建Liststate描述
            ListStateDescriptor offsetStateDesc = new ListStateDescriptor("offset", TypeInformation.of(Long.class));
            //内存中存的值 offset 历史最新值
            offsetState = context.getOperatorStateStore().getListState(offsetStateDesc);
        }
        //重写run方法 读取出 offset 并 循环读取offset+=1，拿到执行的核心编号，输出(核编号和offset),一秒一条,每5条模拟一个异常
        @Override
        public void run(SourceContext<String> ctx) throws Exception {
            //从内存中读取 offset ，再读出来哪个分区id来读的offset

            Iterator<Long> iterator = offsetState.get().iterator();
            if(iterator.hasNext()){
                historyOffset = iterator.next();
            }
            //模拟从kafka中消费数据，每消费一条数据，offset + 1
            while(flag) {
                historyOffset += 1 ;
                int indexOfThisSubtask = getRuntimeContext().getIndexOfThisSubtask();
                String output = "分区:" + indexOfThisSubtask + "消费到的offset位置为:" + historyOffset;
                ctx.collect(output);
                Thread.sleep(1000L);
                //模拟程序的 bug ，offset 能被5整除，抛出一个异常
                if(historyOffset % 5 == 0){
                    System.out.println("当前程序出现bug...");
                    throw new Exception("当前程序出现bug...");
                }
            }
        }
        //重写cancel方法
        @Override
        public void cancel() {
            flag = false;
        }
        //重写 snapshotState 方法 ， 清空 offsetState ，并将最新的offset添加进去
        @Override
        public void snapshotState(FunctionSnapshotContext context) throws Exception {
            offsetState.clear();
            offsetState.add(historyOffset);
        }
    }
}
