package com.group2.edu.realtime.common.base;

import com.group2.edu.realtime.common.util.FlinkSourceUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @author 高耀
 * @date 2024/12/14 10:06<p></p>
 */
public abstract class BaseApp {
    /**
     *  大体流程
     * @param port 任务运行的webUI的端口号
     * @param parallelism 任务并行度
     * @param ckAndGroupId 保存点在hdfs的保存目录以及kafka消费者组id
     * @param topic 返回的kafka主流消费的主题
     */
    public void start(int port,int parallelism,String ckAndGroupId,String topic) {
        //TODO 1 准备流环境
        //1.1 开启任务运行网页
        Configuration conf = new Configuration();
        conf.setString("rest.address", "localhost");
        conf.setInteger("rest.port", port);

        /*// TODO 额外的设置网络缓冲区相关的配置
        conf.setFloat("taskmanager.memory.network.fraction", 0.7F);
        conf.setLong("taskmanager.memory.network.min", 2048L * 1024 * 1024);
        conf.setLong("taskmanager.memory.network.max", 4096L * 1024 * 1024);*/

        //1.2 创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //1.3 全局并行度设为4
        env.setParallelism(parallelism);

        //TODO 2 调整保存点设置
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        /*//2.1 开启检查点，保存间隔为5s(两头间隔)，检查规则为精准一次
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //2.2 设置检查点超时时间为60秒
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        //2.3 设置job取消后的检查点保留策略 为 取消后仍然保留
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        //2.4 设置两个检查点之间的最小间隔(前一个的尾和后一个的头)为2秒
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000);
        //2.5 设置重启策略为3秒一次。重试3次
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000));
        //2.6 设置状态后端，默认就是HashMapStateBackend，也可不设置
        env.setStateBackend(new HashMapStateBackend());
        //2.7 设置检查点存储路径在hdfs上
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/checkpointForRealtimeProject" + ckAndGroupId);
        //2.8 由于要操作hdfs，需要改变操作的用户或者改变文件系统的权限
        System.setProperty("HADOOP_USER_NAME", "atguigu");*/

        //TODO 3 获取主流，从kafka的topic_db读取数据
        //3.1 定义拿取主流数据的kafkaSource
        KafkaSource<String> kafkaSource = FlinkSourceUtil.getKafkaSource(topic, ckAndGroupId);
        //3.2 得到主流
        DataStreamSource<String> mainDs = env.fromSource(
                kafkaSource, WatermarkStrategy.noWatermarks(), "kfSource");
//        mainDs.print();

        //TODO 4 处理数据
        handle(env, mainDs);

        //TODO 5 执行处理
        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

    /**
     * 具体处理方法
     * @param env 流环境
     * @param mainSource 主流
     */
    public abstract void handle(StreamExecutionEnvironment env, DataStreamSource<String> mainSource);
}
