package com.example.checkpoint;

import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * Created with IntelliJ IDEA.
 * ClassName: CheckConfig
 * Package: com.example.checkpoint
 * Description:
 * User: fzykd
 *
 * @Author: LQH
 * Date: 2023-07-29
 * Time: 11:57
 */

//检查点
public class CheckConfig {
    public static void main(String[] args) throws Exception {

        //这样写 还要引入runtime-web依赖 才行
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        env.setParallelism(1);
        //访问hdfs 用户权限 用户名
        System.setProperty("HADOOP_USER_NAME","bobo");
        //TODO 检查点配置
        //第一个参数 是指定周期 第一个参数 是指定类型 默认是精准一次
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //2.开启备份了 但是状态应该持久化到那里
        //首先要获取检查点的配置对象
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        //这里用到了Hadoop 但是pom文件没有hadoop依赖 所以要引入依赖 而且访问还有权限控制
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/chk");
        //3.其他配置
        //超时时间 默认是10分钟 设置1分钟
        checkpointConfig.setCheckpointTimeout(60000);
        //同时运行中的checkpoint并发数
        checkpointConfig.setMaxConcurrentCheckpoints(2);
        //最小的等待间隔 上一次checkpoint结束 到下一次checkpoint开始的时间 设置大于0 并发就会变为1
        checkpointConfig.setMinPauseBetweenCheckpoints(1000); //1秒
        //取消作业时 checkpoint的数据是否保留在外部系统 DELETE_ON_CANCELLATION作业取消会删除  RETAIN_ON_CANCELLATION不会删除
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //允许checkpoint 连续失败的次数 默认0 表示checkpoint一次失败 job就挂了
        checkpointConfig.setTolerableCheckpointFailureNumber(10);

        env.socketTextStream("hadoop102",7777)
                .flatMap(new FlatMapFunction<String, Tuple2<String,Integer>>() {
                    @Override
                    public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
                        final String[] s = value.split(" ");
                        for (String word : s) {
                            out.collect(Tuple2.of(word,1));
                        }
                    }
                })
                .keyBy(value -> value.f0)
                .sum(1)
                .print();
        env.execute();
    }
}
