package com.education.realtime.app;


import com.education.realtime.util.FlinkSourceUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

public abstract class BaseAppV1 {

    /**
     *
     * @param port  端口号
     * @param p     并行度
     * @param groupIdAndJobNma  消费者组与checkpoint保存路径
     * @param topic  主题
     */
    public void init(int port,int p,String groupIdAndJobNma,String topic) {
        System.setProperty("hadoop_user_name","atguigu");

        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(p);

        //设置程序失败重试
        env.setRestartStrategy(
                RestartStrategies.failureRateRestart(
                        10,
                        Time.days(1),
                        Time.seconds(5)
                )
        );

        env.enableCheckpointing(3000);//开启Checkpoint,每隔3秒钟做一次CK,
        env.setStateBackend(new HashMapStateBackend());//设置状态后端
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/" + groupIdAndJobNma);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);//指定CK的一致性语义
        env.getCheckpointConfig().setCheckpointTimeout(60*1000);//设置超时时间为 1 分钟
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);//设置两次重启的最小时间间隔
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);//设置最大并发检查点尝试次数
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);//设置任务关闭的时候保留最后一次 CK 数据

        DataStreamSource<String> stream = env.addSource(FlinkSourceUtil.getKafkaSource(groupIdAndJobNma, topic));

        handle(env,stream);

        try {
           env.execute(groupIdAndJobNma);
        } catch (Exception e) {
            e.printStackTrace();
        }


    }

    protected abstract void handle(StreamExecutionEnvironment env, DataStreamSource<String> stream);

}

