package com.lsx143.realtime.app;

import com.lsx143.realtime.util.KafkaUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * App的基类
 */
@SuppressWarnings("all")
public abstract class BaseApp {
    /**
     * 实现APP的业务逻辑
     *
     * @param env          flink的执行环境
     * @param sourceStream 源数据的流
     */
    protected abstract void run(StreamExecutionEnvironment env,
                                DataStreamSource<String> sourceStream);

    /**
     * 初始化APP消费Kafka的配置
     *
     * @param port               job的端口号
     * @param appName            app的名称，也用于checkPoint路径的名称
     * @param defaultParallelism 默认并行度
     * @param groupId            消费者组
     * @param topic              主题
     */
    protected void init(int port, String appName, int defaultParallelism, String groupId, String topic) {
        //0、设置端口
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        //1、设置并行度
        env.setParallelism(defaultParallelism);
        //2、开启精准一次，每3000ms开始一次checkpoint
        env.enableCheckpointing(3000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(100);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        //3、checkpoint必须在一分钟内完成，否则将会被抛弃
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        //4、开启在job终止后仍然保留外部checkpoint
        env.getCheckpointConfig()
                .enableExternalizedCheckpoints(
                        CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
                );
        //5、设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/gmall2021/flink/ck/" + appName);

        //6、得到kafka输入流
        DataStreamSource<String> srcStream = env.addSource(KafkaUtil.getKafkaSource(groupId, topic));
        //7、执行app的业务逻辑
        run(env, srcStream);
        try {
            env.execute(appName);
        } catch (Exception e) {
            e.printStackTrace();
        }
    }
}
