
/*
 * Copyright © 2021 https://www.cestc.cn/ All rights reserved.
 */

package com.zx.learn.flink.state;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.ListState;
import org.apache.flink.api.common.state.ListStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.runtime.state.FunctionInitializationContext;
import org.apache.flink.runtime.state.FunctionSnapshotContext;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.checkpoint.CheckpointedFunction;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.RichParallelSourceFunction;

/**
 * 全局配置文件：flink-conf.yaml
 * state.backend: filesystem
 * state.checkpoints.dir: hdfs://namenode-host:port/flink-checkpoints
 * state.savepoints.dir: hdfs://namenode-host:port/flink-savepoints
 * <p>
 * 通过代码进行设置
 * env.enableCheckpointing(1000);
 * env.setStateBackend(new FsStateBackend("file:///D:/chk"));
 * <p>
 * 使用ListState存储offset模拟Kafka的offset维护
 */
public class OperatorStateDemo01 {
    public static void main(String[] args) throws Exception {
        //1.env
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //先直接使用下面的代码设置Checkpoint时间间隔和磁盘路径以及代码遇到异常后的重启策略,下午会学
        //2.开启checkpoint ，并将状态保存到 file:///D:/chk ,先开启checkpoint ,state管理
        //每隔 1s 执行一次Checkpoint
        env.enableCheckpointing(1000);
        //将全局的状态保存到哪里？  hdfs://node1:8020/checkpoint/
        // RocksdbStateBackend ： rocksdb 插件 异步增量刷新到 HDFS 文件系统中
        env.setStateBackend(new FsStateBackend("file:///D:/ckp"));
        //当前任务被取消，checkpoint是否被保存下来
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // 设置当前的 checkpoint 的超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        // 两个checkpoint 之间最短的间隔时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 设置当前并行执行的 checkpoint 的个数
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        //env.setStateBackend(new FsStateBackend("hdfs://node1:8020/checkpoints"));
        //env.setStateBackend(new RocksdbStateBackend())

        // 当前checkpoint 机制 EXACTLY_ONCE
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //固定延迟重启策略: 程序出现异常的时候，重启2次，每次延迟3秒钟重启，超过2次，程序退出
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000));

        //2.Source
        DataStreamSource<String> sourceData = env.addSource(new MyMoniKafkaSource());

        //3.Transformation
        //4.Sink
        sourceData.print();

        //5.execute
        env.execute();
    }

    //1.创建类 MyMoniKafkaSource 继承 RichparallelSourceFunction 并实现 CheckpointedFunction
    public static class MyMoniKafkaSource extends RichParallelSourceFunction<String> implements CheckpointedFunction {
        //1.1. 定义ListState<Long>用于存储 offsetState、offset、flag
        ListState<Long> offsetState;
        Long offset = 0L;
        boolean flag = true;

        //1.2. 重写 initializeState 方法
        //	//创建List状态描述器
        //	//根据状态描述器初始化状态通过context
        @Override
        public void initializeState(FunctionInitializationContext context) throws Exception {
            ListStateDescriptor<Long> desc = new ListStateDescriptor<>("offsetState",
                    TypeInformation.of(Long.class));
            offsetState = context.getOperatorStateStore()
                    .getListState(desc);
        }

        @Override
        public void run(SourceContext<String> ctx) throws Exception {
            //获取并迭代ListState中的值，如果存在赋值给offset
            Iterable<Long> offsets = offsetState.get();
            if (offsets.iterator().hasNext()) {
                offset = offsets.iterator().next();
            }
            //	//while(flag)
            while (flag) {
                //将处理的offset累加1、获取当前子任务的Index
                offset += 1;
                //ctx收集id和offset（"分区:"+id+"消费到offset的位置为:"+offset
                int id = getRuntimeContext().getIndexOfThisSubtask();
                ctx.collect("分区:" + id + "消费到offset的位置为:" + offset);
                Thread.sleep(2000);
                //）并输出
                //	//休眠2秒，此时保存state到checkpoint
                //	//模拟异常 每5秒钟抛出异常，看后续offset是否还能恢复
                if (offset % 5 == 0) {
                    System.out.println("当前程序出现bug");
                    throw new Exception("当前程序出现bug");
                }
            }
        }

        @Override
        public void cancel() {
            flag = false;
        }

        @Override
        public void snapshotState(FunctionSnapshotContext context) throws Exception {
            //清空内存offsetState中存储的offset
            offsetState.clear();
            //添加offset到state中
            offsetState.add(offset);
        }

    }
}
