package com.atguigu.edu.realtime.common.base;


import com.atguigu.edu.realtime.common.util.SqlUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public abstract class BaseSqlApp {
    public void start(int port,int parallelism,String ckAndGroupId,String topic){
        //1.基本环境准备
        //1.1指定流处理环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //1、2设置并行度
        env.setParallelism(parallelism);
        //1.3指定表处理环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        //2.检查点相关配置
        //2.1开启检查点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        //2.2 设置检查点超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(60000L);
//        //2.3 设置job取消后，检查点是否保留
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
//        //2.4 设置两个检查点之间最小时间间隔
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L);
//        //2.5 设置重启策略
//        //env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));
//        //2.6 设置状态后端 以及检查点存储路径
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/ck/" + ckAndGroupId);
//        //2.7 设置操作hadoop的用户
//        System.setProperty("HADOOP_USER_NAME","atguigu");

        //3.业务处理逻辑
        handle(env,tableEnv);
    }

    public abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tableEnv) ;

    protected void getTopicDbTable(StreamTableEnvironment tableEnv,String groupId){
        tableEnv.executeSql("create table topic_db(\n" +
                "\t`database` String,\n" +
                "\t`data` MAP<string,string>,\n" +
                "\t`old` MAP<string,string>,\n" +
                "\t`type` String,\n" +
                "\t`table` String,\n" +
                "\t`ts` bigint,\n" +
                "  pt as proctime(),\n" +
                "  et as TO_TIMESTAMP_LTZ(ts, 0),\n" +
                "  WATERMARK FOR et AS et \n" +
                ")" + SqlUtil.getKafkaDDL("topic_db",groupId));
    }
    protected void getBaseTable(StreamTableEnvironment tableEnv){
        tableEnv.executeSql("CREATE TABLE base_dic (\n" +
                " dic_code string,\n" +
                " info ROW<dic_name string>,\n" +
                " PRIMARY KEY (dic_code) NOT ENFORCED\n" +
                ") " + SqlUtil.getHBaseDDL("dim_base_dic"));
    }
}
