package cn.xuexiyuan.flinkstudy.state;

import org.apache.flink.api.common.RuntimeExecutionMode;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;

import java.util.Iterator;


/**
 * @Description: 使用 Operator 中的 ListState 模拟 KafkaSource 进行 offset 维护
 * @Author 左龙龙
 * @Date 21-3-26
 * @Version 1.0
 **/
public class StateDemo02_OperatorState {

    public static void main(String[] args) throws Exception {
        // 0.env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setRuntimeMode(RuntimeExecutionMode.AUTOMATIC);
        // 设置并行度1方便观察
        env.setParallelism(1);
        // 以下 checkpoint 相关配置
        // 每隔 1s 执行异常 checkpoint
        env.enableCheckpointing(1000);
        env.setStateBackend(new FsStateBackend("file:///tmp/ckp"));
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 固定延迟重启策略：　程序出现异常的时候，重启２次，　每次延迟３秒重启，超过２次程序退出
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 3000));


        // 1.source
        DataStreamSource<String> ds = env.addSource(new MyKafkaSource());

        // 2.transformation


        // 3.sink
        ds.print();


        // 4.excute
        env.execute();
    }

}


// 使用 Operator 中的 ListState 模拟 KafkaSource 进行 offset 维护
class MyKafkaSource extends RichParallelSourceFunction<String> implements CheckpointedFunction {

    private boolean runing = true;
    //　用来存储 offset
    private ListState<Long> offsetState = null;
    private Long offset = 0L;


    @Override
    public void initializeState(FunctionInitializationContext context) throws Exception {
        // 初始化创建 ListState
        ListStateDescriptor stateDescriptor = new ListStateDescriptor("offsetState", Long.class);
        offsetState = context.getOperatorStateStore().getListState(stateDescriptor);
    }

    @Override
    public void run(SourceContext<String> context) throws Exception {
        while (runing) {
            Iterator<Long> iterator = offsetState.get().iterator();
            if (iterator.hasNext()) {
                offset = iterator.next();
            }

            offset += 1;
            int subtaskId = getRuntimeContext().getIndexOfThisSubtask();
            context.collect("subTaskId: " + subtaskId + ",当前的 offset 值为: " + offset);
            Thread.sleep(1000);

            // 模拟异常
            if (offset % 5 == 0) {
                throw new Exception("bug ...");
            }
        }
    }


    // 该方法会定时执行将 state 状态从内存存入 Checkpoint 磁盘目录中
    @Override
    public void snapshotState(FunctionSnapshotContext functionSnapshotContext) throws Exception {
        offsetState.clear();
        offsetState.add(offset);
    }

    @Override
    public void cancel() {
        runing = false;
    }
}
