package com.millstein.realtime.app.base;

import com.millstein.realtime.util.FlinkSourceUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

@Slf4j
public abstract class BaseAppV1 {

    /**
     * 具体数据处理的逻辑，由子类编写
     * @param env 执行环境对象
     * @param streamSource 待处理的数据流
     */
    protected abstract void handle(
            StreamExecutionEnvironment env, DataStreamSource<String> streamSource
    );

    /**
     * 初始化app
     * @param webUIPort web界面端口号
     * @param parallelism 并行度
     * @param appName app名称
     * @param topicName 主题
     */
    public void init(int webUIPort, int parallelism, String appName, String topicName) {
        // 1.设置操作用户
        System.setProperty("HADOOP_USER_NAME", "tsing");

        // 2.创建执行环境对象
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", webUIPort);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(parallelism);

        // 3.设置checkpoint
        // 开启checkpoint，并设置时间间隔
        env.enableCheckpointing(5000L);
        // 设置checkpoint的精确性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置state保存到内存中
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint的存储地址
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/stream/" + appName);
        // 设置连续的两次checkpoint之间最小的时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
        // 设置checkpoint可以容忍的最大失败次数
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        // 设置当job取消时是否需要保存Checkpoint数据，默认自动删除数据。这里是1.13.1版本的写法
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        // 设置checkpoint的超时时间，如果超过时间，就表示失败
        env.getCheckpointConfig().setCheckpointTimeout(5 * 60 * 1000L);

        // 4.获取kafkaSource
        FlinkKafkaConsumer<String> kafkaProducer = FlinkSourceUtil.getFlinkKafkaConsumer(appName, topicName);

        // 5.添加kafkaSource
        DataStreamSource<String> streamSource = env.addSource(kafkaProducer);

        // 6.执行数据的处理
        this.handle(env, streamSource);

        // 7.提交任务
        try {
            env.execute(appName);
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }
}
