package com.education.realtime.app;


import com.education.realtime.common.Constant;
import com.education.realtime.util.SqlUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;


public abstract class BaseSQLApp {
     /**
     *
     * @param port 端口号
     * @param ckJobName ck路径 消费者 jobName
     */
     public void init(int port, String ckJobName){

         System.setProperty("HADOOP_USER_NAME", "atguigu");
         Configuration conf = new Configuration();
         conf.setInteger("rest.port",port);
         StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
         env.setParallelism(3);
         env.setRestartStrategy(RestartStrategies.failureRateRestart(10, Time.days(1),Time.seconds(5)));
//         env.setStateBackend(new HashMapStateBackend());
//         env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/" + ckJobName);
//         env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//         // 每 3000ms 开始一次 checkpoint
//         env.enableCheckpointing(3000);
//         // Checkpoint 必须在一分钟内完成，否则就会被抛弃
//         env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
//         // 同一时间只允许一个 checkpoint 进行
//         env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
//         // 确认 checkpoints 之间的时间会进行 500 ms
//         env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
//         // 使用 externalized checkpoints，这样 checkpoint 在作业取消后仍就会被保留
//         env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

         StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

         // 给sql job设置名字
         tEnv.getConfig().getConfiguration().setString("pipeline.name", ckJobName);
         handle(env,tEnv);
     }

    protected abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv);

    public void readOdsDb(StreamTableEnvironment tEnv, String groupId){
        tEnv.executeSql("create table ods_db( " +
                "  `database` string, " +
                "  `table` string, " +
                "  `type` string, " +
                "  `ts` string, " +
                "  `xid` string, " +
                "  `commit` string, " +
                "  `data` map<string,string>, " +
                "  `old` map<string,string>, " +
                "  `pt` as proctime() " +
                ")" + SqlUtil.getKafkaReadDDL(Constant.TOPIC_ODS_DB, groupId));
    }
}
