package com.atguigu.medical.realtime.app;

import com.atguigu.medical.realtime.util.FlinkSourceUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import java.util.HashMap;
import java.util.Map;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
 * @Author lzc
 * @Date 2023/4/19 15:28
 */
public abstract class BaseAppV2 {
    public abstract void handle(StreamExecutionEnvironment env, Map<String, DataStreamSource<String>> topicToStreamMap);
    
    public void init(int port, int p, String ckAndGroupIdAndJobName, String... topics) {
        if (topics.length == 0) {
            throw new IllegalArgumentException("请至少传递一个要消费的 topic");
        }
        
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        
        // 1. 获取一个流的执行环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        conf.setString("pipeline.name", ckAndGroupIdAndJobName);  // 给流的 job 命名
        
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(p);
        // 设置转态后端
        env.setStateBackend(new HashMapStateBackend());// 在生产环境下, 一般通过参数指定
        // 开启 checkpoint
        env.enableCheckpointing(3000);
        // 1.1. 指定一致性的语义:严格一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 1.22. 设置 checkpoint 的存储位置
        // 把 checkpoint 存储到 JobManage 的内存中
        //        env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());
        // 把 checkpoint 存储到 hdfs 中
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/gmall2023/" + ckAndGroupIdAndJobName);
        
        // 1.3. 设置 checkpoint 的超时时间: 10s
        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
        // 1.44. 设置 checkpoint 的并发数:1 表示最多只有一个版本的 checkpoint 在执行
        //        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);  // 设置最小间隔之后, 这个参数不需要设置
        // 1.5. 设置两个 checkpoint 之间的最小时间间隔: 500ms 表示上个结束 500ms 之后才会开启下一个
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 1.6. job 取消的时候,是否删除 checkpoint 数据
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
        // 2. 从 kafka 的 topic中读取数据
    
        Map<String, DataStreamSource<String>> topicToStreamMap = new HashMap<>();
        for (String topic : topics) {
            KafkaSource<String> source = FlinkSourceUtil.getKafkaSource(ckAndGroupIdAndJobName, topic);
            DataStreamSource<String> stream = env.fromSource(source, WatermarkStrategy.noWatermarks(), "kafka-source");
            topicToStreamMap.put(topic, stream);
        }
        // 具体的业务处理: 父类肯定不知道, 只有子类知道如何做
        handle(env, topicToStreamMap);
        
        
        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
    
}
