package com.atguigu.day09;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @author Felix
 * @date 2024/4/11
 * 该案例演示了检查点相关的设置
 */
public class Flink01_CheckPoint {
    public static void main(String[] args) throws Exception {
        Configuration conf = new Configuration();
        //设置最终检查点
        //conf.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, false);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(conf);

        //启用检查点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);

        //设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        //env.setStateBackend(new EmbeddedRocksDBStateBackend());
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        //检查点存储
        //如果使用Hashmap状态后端，状态存在TM堆内存中，检查点默认存在JobManager堆内存
        //checkpointConfig.setCheckpointStorage(new JobManagerCheckpointStorage());
        //指定将检查点存在HDFS上
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ck");

        //检查点模式（CheckpointingMode）  如果在启用检查点的时候，没有指定检查点模式，可以通过如下方法指定
        //checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);

        //超时时间（checkpointTimeout）
        checkpointConfig.setCheckpointTimeout(60000L);

        //最小间隔时间（minPauseBetweenCheckpoints）
        checkpointConfig.setMinPauseBetweenCheckpoints(2000L);

        //最大并发检查点数量（maxConcurrentCheckpoints）
        //checkpointConfig.setMaxConcurrentCheckpoints(1);

        //开启外部持久化存储（enableExternalizedCheckpoints）---当job取消后，检查点是否保留
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION);

        //检查点连续失败次数（tolerableCheckpointFailureNumber）
        //checkpointConfig.setTolerableCheckpointFailureNumber(0);

        //设置重启策略
        //这个配置是Flink本身提供的容错手段，如果运行的程序出现问题了，Flink会自动的进行重启，默认重启Integer.MAX_VALUE
        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));

        //非对齐检查点（enableUnalignedCheckpoints）
        //checkpointConfig.enableUnalignedCheckpoints();

        //对齐检查点超时时间（alignedCheckpointTimeout）
        //checkpointConfig.setAlignedCheckpointTimeout(Duration.ofSeconds(10));

        //配置操作hadoop的用户
        System.setProperty("HADOOP_USER_NAME","atguigu");

        //设置增量检查点(了解 目前处于实验性阶段)
        //env.enableChangelogStateBackend(true);



        env
                .socketTextStream("hadoop102", 8888)
                .flatMap(
                        (String lineStr, Collector<Tuple2<String,Long>> out)->{
                            String[] words = lineStr.split(" ");
                            for (String word : words) {
                                out.collect(Tuple2.of(word, 1L));
                            }
                        }
                )
                .returns(Types.TUPLE(Types.STRING,Types.LONG))
                .keyBy(0)
                .sum(1)
                .print();
        env.execute();
    }
}
