package com.bujunjie.study.realtime.common.base;

import com.bujunjie.study.realtime.common.constant.FlinkConstant;
import com.bujunjie.study.realtime.common.util.SQLUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * <p>基础 sql 类</p>
 *
 * @author bu.junjie
 * @version 1.0.0
 * @createTime 2025/9/16 16:10
 */
public abstract class BaseSQLApp {


    /**
     * <p>Flink Sql 环境启动</p>
     *
     * @param port        端口
     * @param parallelism 并行度
     * @param ck          对应 kafka 的主题
     * @return
     * @author bu.junjie
     * @date 2025/9/16 16:52
     */
    public void start(int port, int parallelism, String ck) {
        //TODO 1.基本环境准备
        //1.1 指定流处理环境
        Configuration conf = new Configuration();
        conf.set(RestOptions.PORT, port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //1.2 设置并行度
        env.setParallelism(parallelism);
        //1.3 指定表执行环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        //TODO 2.检查点相关的设置
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30), Time.seconds(3)));
        /*
        //2.1 开启检查点
        //2.2 设置检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(6000L);
        //2.3 设置状态取消后，检查点是否保留
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //2.4 设置两个检查点之间最小时间间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L);
        //2.5 设置重启策略T;
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));
        //2.6 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck/" + ck);
        //2.7 设置操作hadoop的用户
        System.setProperty("HADOOP_USER_NAME","atguigu");
        */
        //TODO 3.业务处理逻辑
        this.handle(tableEnv);
    }

    public abstract void handle(StreamTableEnvironment tableEnv);

    /**
     * <p>读取 ODS 的数据</p>
     *
     * @param tableEnv flinkSql 上下问环境
     * @param groupId  groupId
     * @return
     * @author bu.junjie
     * @date 2025/9/16 16:12
     */
    public void readOdsDb(StreamTableEnvironment tableEnv, String groupId) {
        tableEnv.executeSql("CREATE TABLE topic_db (\n" +
                "  `databese` string,\n" +
                "  `table` string,\n" +
                "  `type` string,\n" +
                "  `ts` bigint,\n" +
                "  `data` map<string,string>,\n" +
                "  `old` map<string,string>,\n" +
                "  `pt` as proctime(),\n" +
                "   et as to_timestamp_ltz(ts, 0),\n" +
                "   watermark for et as et - interval '3' second " +
                ")" + SQLUtil.getKafkaDDL(FlinkConstant.TOPIC_DB, groupId, FlinkConstant.KAFKA_BROKERS));

    }


    /**
     * <p>读取 hbase 的表</p>
     *
     * @param tableEnv 执行的上下文环境
     * @author bu.junjie
     * @date 2025/9/16 17:11
     */
    public void readBaseDic(StreamTableEnvironment tableEnv) {
        tableEnv.executeSql("CREATE TABLE base_dic (\n" +
                " dic_code string,\n" +
                " info ROW<dic_name string>,\n" +
                " PRIMARY KEY (dic_code) NOT ENFORCED\n" +
                ") " + SQLUtil.getHBaseDDL("dim_base_dic"));
        //tableEnv.executeSql("select * from base_dic").print();
    }

}
