package com.atguigu.gmall.realtime.common.base;

import com.atguigu.gmall.realtime.common.constant.Constant;
import com.atguigu.gmall.realtime.common.util.SQLUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * ClassName: BaseSQLApp
 * Description:
 *
 * @Create 2024/4/20 15:10
 */
public abstract class BaseSQLApp {
    public void start(int port,int parallelism, String ck) {
        //TODO 环境
        Configuration conf = new Configuration();
        conf.set(RestOptions.PORT,port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);

        env.setParallelism(parallelism);

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);

        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 3000L));

        /*
        //TODO 检查点设置
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();

        checkpointConfig.setCheckpointTimeout(60000L);
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        checkpointConfig.setMinPauseBetweenCheckpoints(2000L);

        env.setStateBackend(new HashMapStateBackend());
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ck" + ck);
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        */

        //TODO 业务处理
        handle(env,tableEnv);
    }


    public abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tableEnv);

    public void readOdsDb(StreamTableEnvironment tableEnv,String groupId) {
        tableEnv.executeSql("CREATE TABLE topic_db (\n" +
                "   `database` String,\n" +
                "   `table` String,\n" +
                "   `type` String,\n" +
                "   `data` MAP<String,String>,\n" +
                "   `old` MAP<String,String>,\n" +
                "   `ts` BIGINT,\n" +
                //lookup join必须指定处理时间
                "   pt as proctime(),\n" +
                //ts转换为timestamp类型，用于watermark
                "   et as TO_TIMESTAMP_LTZ(ts,0),\n" +
                //严格单调递增
                "   WATERMARK FOR et AS et\n" +
                ") " + SQLUtil.getKafkaDDL(Constant.TOPIC_DB,groupId));
    }

    public void readBaseDic(StreamTableEnvironment tableEnv) {
        tableEnv.executeSql("CREATE TABLE base_dic\n" +
                "(\n" +
                "    dic_code string,\n" +
                "    info ROW <dic_name string >,\n" +
                "    PRIMARY KEY (dic_code) NOT ENFORCED\n" +
                ")\n" + SQLUtil.getHBaseDDL("gmall1030:dim_base_dic"));
    }

}
