package com.atguigu.flink.Checkpoint;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

import java.time.Duration;

public class Checkpoint {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000L , CheckpointingMode.EXACTLY_ONCE);

        //设置状态后端
        env.setStateBackend(new HashMapStateBackend());

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        //设置检查点存储位置 （默认在JobManager的堆内存中）
        //checkpointConfig.setCheckpointStorage(new JobManagerCheckpointStorage());
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ck");

        //检查点模式（CheckpointingMode）[AT_LEAST_ONCE --> 至少一次 || EXACTLY_ONCE --> 精准一次（默认）]
        //checkpointConfig.setCheckpointingMode(CheckpointingMode.AT_LEAST_ONCE);

        //超时时间
        checkpointConfig.setCheckpointTimeout(60000L);

        //最小间隔时间
        checkpointConfig.setMinPauseBetweenCheckpoints(2000L);

        //最大并发检查点数量(默认为1)
        //checkpointConfig.setMaxConcurrentCheckpoints(1);

        //开启外部持久储存---job取消后是否保留
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION); //在作业取消时保留外部化检查点。
        //checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS); 外部化检查点被完全禁用
        //checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION); 在作业取消时删除外部化检查点

        //最大失败次数(默认为 0)
        checkpointConfig.setTolerableCheckpointFailureNumber(0);

        //非对齐检查点
        checkpointConfig.enableUnalignedCheckpoints();

        //对齐检查点超时时间
        checkpointConfig.setAlignedCheckpointTimeout(Duration.ofSeconds(10));

        //flink本身除了检查点之外，还提供了其它的一些容错机制
        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));

        //开启增量检查点
        //env.enableChangelogStateBackend(true);

        System.setProperty("HADOOP_USER_NAME","atguigu");


        env.socketTextStream("hadoop102" , 8888).uid("socket_id")
                .flatMap(
                        new FlatMapFunction<String, Tuple2<String , Integer>>() {
                            @Override
                            public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                                String[] s = value.split(" ");
                                for (String world : s) {
                                    out.collect(Tuple2.of(world , 1));
                                }
                            }
                        }
                ).uid("flat_map")
                .returns(Types.TUPLE(Types.STRING, Types.INT))
                .keyBy(data -> data.f0)
                .sum(1).uid("sum_id")
                .print().uid("print_id");
        env.execute();
    }
}
