package com.atguigu.edu.realtime.common.base;

import com.atguigu.edu.realtime.common.constant.Constant;
import com.atguigu.edu.realtime.common.util.FlinkSqlUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
 * @author WEIYUNHUI
 * @date 2024/12/23 10:33
 */
public  abstract  class BaseSqlApp {

    public void start( Integer port , int parallelism  , String  ckPath  ){
        // 设置HDFS用户
        System.setProperty(Constant.HDFS_USER_NAME_CONFIG, Constant.HDFS_USER_NAME_VALUE);
        // 配置
        Configuration conf = new Configuration();
        conf.setInteger(RestOptions.PORT, port );
        // 1. 创建执行环境
        // TableEnvironment
        // StreamTableEnvironment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        // 设置并行度
        env.setParallelism(parallelism) ;
        // 流表环境
        StreamTableEnvironment streamTableEnv = StreamTableEnvironment.create(env);

        // 2. 状态后端 和 检查点配置
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        // 开启 checkpoint
        env.enableCheckpointing(5000 );
        // 设置 checkpoint 模式: 精准一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // checkpoint 存储
        env.getCheckpointConfig().setCheckpointStorage(Constant.CK_PATH_PREFIX  +  ckPath );
        // checkpoint 并发数
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        // checkpoint 之间的最小间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
        // checkpoint  的超时时间
        env.getCheckpointConfig().setCheckpointTimeout(20000);
        // job 取消时 checkpoint 保留策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);

        handle( streamTableEnv , env );
    }

    public abstract void handle(StreamTableEnvironment streamTableEnv  , StreamExecutionEnvironment env) ;


    public static void readOdsTopicDb(StreamTableEnvironment streamTableEnv , String groupId){
        //  连接器表(source)
        streamTableEnv.executeSql(
                "CREATE TABLE topic_db ( " +
                        "  `database` STRING," +
                        "  `table` STRING," +
                        "  `type` STRING," +
                        "  `ts` BIGINT ," +
                        "  `data` MAP<STRING,STRING> , " +
                        "  `old` MAP<STRING,STRING> ,  " +
                        "  `pt` AS PROCTIME() ,  " +
                        "  `et` AS TO_TIMESTAMP_LTZ( ts , 0) , " +
                        "  WATERMARK FOR et  AS et - INTERVAL '3' SECOND  " +
                        ")" + FlinkSqlUtil.getKafkaSourceDDL(Constant.TOPIC_DB , groupId)
        );

    }

    public static void readDimBaseDic(StreamTableEnvironment streamTableEnv ){
        streamTableEnv.executeSql(
                " CREATE TABLE dim_base_dic ( " +
                        " dic_code STRING, " +
                        " info ROW<dic_name STRING>," +
                        " PRIMARY KEY (dic_code) NOT ENFORCED " +
                        ") WITH (" +
                        " 'connector' = 'hbase-2.2', " +
                        " 'table-name' = '"+ Constant.HBASE_NAMESPACE+":dim_base_dic', " +
                        " 'zookeeper.quorum' = '" + Constant.HBASE_ZOOKEEPER_QUORUM+ "' , " +
                        " 'lookup.async' = 'true' , " +
                        " 'lookup.cache' = 'PARTIAL' , " +
                        " 'lookup.partial-cache.max-rows' = '100' , " +
                        " 'lookup.partial-cache.expire-after-write' = '1 hour' , " +
                        " 'lookup.partial-cache.expire-after-access' = '1 hour' " +
                        " )"
        );

    }
}
