package app.dwd;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import util.FlinkSourceUtil;

import java.util.concurrent.TimeUnit;

/**
 * @Author lzc
 * @Date 2022/10/5 16:29
 */
public abstract class BaseAppV1 {
    public void init(int port, int p, String ckAndGroupIdAndJobName ,String topic){
        // ctrl + shift + u
        System.setProperty("HADOOP_USER_NAME", "atguigu");
    
        // 读取 ods_db 数据
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(p);  // 本地调试方便. 并行度设置一般和 kafka 的topic 的分区数保持一致
        // 开启 checkpoint
        //env.enableCheckpointing(3000);
        // 设置状态后端: 1.13 之前: 1. memory(内存, JobManager 的内存) 2. fs(本地:TaskManager 的内存, checkpoint: hdfs)  3. rocksdb(rocks 数据库, hdfs)
        // 1.13 之后状态后端只负责本地怎么存: 1. HashMapStateEnd 2. rocksdb
        // 1.13 checkpoint 专门有相关 api 处理: 1. JobManager内存  2. hdfs
        env.setStateBackend(new HashMapStateBackend());
    
        //有了 checkpoint 的之后, 程序如果失败会无限重启, 这个参数可以设置重启的次数.
        env.setRestartStrategy(RestartStrategies.failureRateRestart(
            3, Time.of(1L, TimeUnit.DAYS), Time.of(3L, TimeUnit.MINUTES)
        ));
    
        // 设置 checkpoint 相关的参数
        // 1. 设置 checkpoint 的模式
        CheckpointConfig ckConf = env.getCheckpointConfig();
        // 1.0 设置 checkpoint 的存储路径
        ckConf.setCheckpointStorage("hdfs://hadoop174:8020/edu/ck/" + ckAndGroupIdAndJobName);
        // 1.1 严格一次
        ckConf.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 1.1 最大并发数
        //ckConf.setMaxConcurrentCheckpoints(1);
        // 1.2 两次checkpoint的最小间隔. 设置了这个 setMaxConcurrentCheckpoints 可以不用设置
        ckConf.setMinPauseBetweenCheckpoints(500);
        // 1.3 checkpoint 的超时时间: 设置 1 分钟的超时时间.
        ckConf.setCheckpointTimeout(60 * 1000);
        // 1.4 设置 checkpoint 的失败的次数. 值会累加
        ckConf.setTolerableCheckpointFailureNumber(5);
        // 1.5 程序取消的时候, 要不要删除 checkpoint 数据. RETAIN_ON_CANCELLATION 当job 取消的时候保留外部的 checkpoint
        ckConf.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    
        // 读取 Kafka 的数据
        DataStreamSource<String> stream = env.addSource(FlinkSourceUtil.getKafkaSource(topic, ckAndGroupIdAndJobName));
        
        // 对流怎么操作, 只有子类知道
        handle(env, stream);
    
    
        try {
            env.execute(ckAndGroupIdAndJobName);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
    
    protected abstract void handle(StreamExecutionEnvironment env,
                                   DataStreamSource<String> stream) ;
}
