package com.atguigu.edu.realtime.Base;

import com.atguigu.edu.realtime.utils.FlinkSourceUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

public abstract class BaseStreamApp {


    protected abstract void handle(StreamExecutionEnvironment env,
                                   DataStreamSource<String> stream);

    public void init(int port, int p, String ckAndGroupIdAndJobName, String topic) {
        // 1、设置系统用户：windows下不设置会报权限问题
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        // 2、创建Stream执行环境
        Configuration configuration = new Configuration();
        configuration.setString("pipeline.name", ckAndGroupIdAndJobName);
        configuration.setInteger("rest.port", port);
        StreamExecutionEnvironment env = initEnv(configuration, ckAndGroupIdAndJobName, p);

        // 3、自定义source，读取kafka里的数据
        DataStreamSource<String> stream = env.fromSource(FlinkSourceUtil.getKafkaSource(ckAndGroupIdAndJobName, topic),
                WatermarkStrategy.noWatermarks(),
                "kafka-source"
        );

        //3、通过继承，让子类自己去实现业务
        handle(env, stream);

        //4、跑！
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }



    private StreamExecutionEnvironment initEnv(Configuration configuration, String ckAndGroupIdAndJobName, Integer parallelism) {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        // kafka分区数多少 这里并行度就设置多少
        env.setParallelism(1);

        // 在线教育目前没有非常大状态，就先用HashMapStateBackend。
        // 另外还有RocksStateBackend 基于内存和磁盘的
        env.setStateBackend(new HashMapStateBackend());
        // 启动checkpoint防止状态丢失，并设置周期
        env.enableCheckpointing(3000);  // 公司中: 一般是分钟界别
        // checkpoint严格一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // checkpoint 的存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/" + ckAndGroupIdAndJobName);
        //  超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        // todo checkpoint存在的问题：可能上一次checkpoint的时间太长
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 当 job 取消的时候的,checkpoint 的保留策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION); // 1.13.6 使用这个
        return env;
    }

}
