package com.group2.edu.realtime.common.base;

import com.group2.edu.realtime.common.constant.Constant;
import com.group2.edu.realtime.common.util.SQLUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author 高耀
 * @date 2024/12/14 16:17<p></p>
 */
public abstract class BaseSQLApp {
    /**
     * 父类的启动方法，其中创建流表环境和对检查点设置
     * @param port WebUI访问端口
     * @param parallelism 并行度
     * @param ckDir 检查点保存目录
     */
    public void start(Integer port, Integer parallelism, String ckDir) {
        //TODO 1 创建流表环境
        //1.1 WebUI地址
        Configuration conf = new Configuration();
        conf.setString("rest.address", "localhost");
        conf.setInteger("rest.port", port);
        //1.2 流环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //并行度
        env.setParallelism(parallelism);
        //1.3 流表环境
        StreamTableEnvironment streamTableEnvironment = StreamTableEnvironment.create(env);
        //TODO 2 检查点设置
        //2.1 开启检查点，设置检查点间隔和策略
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //2.2 重启动策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000));
        /*//2.3 状态后端
        env.setStateBackend(new HashMapStateBackend());
        //2.4 检查点在hdfs保存的路径
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck/" + ckDir);
        //2.5 设置操作hdfs的用户
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        //2.6 设置两检查点间最小的间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000);
        //2.7 设置检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60000);
        //2.8 设置检查点重启后是否保存
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);*/
        //TODO 3 具体业务流程
        handle(env, streamTableEnvironment);
    }

    protected abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment streamTableEnvironment);


    /**
     * 此方法用于在流表环境中使用connector读取topic_db主题的数据到topic_db表中
     * @param streamTableEnvironment 流表环境
     * @param groupId 消费者组id
     */
    public void readTopicDb(StreamTableEnvironment streamTableEnvironment, String groupId) {
        streamTableEnvironment.executeSql("CREATE TABLE topic_db (\n" +
                "  `database` STRING,\n" +
                "  `table` STRING,\n" +
                "  `type` STRING,\n" +
                "  `data` MAP<STRING, STRING>,\n" +
                "  `old` MAP<STRING, STRING>,\n" +
                "  `ts` BIGINT,\n" +
                "  `pt` AS PROCTIME(),\n" +
                "  `et` AS TO_TIMESTAMP_LTZ(ts, 0),\n" +
                "  WATERMARK FOR et AS et - INTERVAL '0' SECOND\n" +
                ")" + SQLUtil.getKafkaProperty(Constant.TOPIC_DB, groupId));
    }

}
