package com.bw.medical.realtime.common.base;

import com.bw.medical.realtime.common.constant.Constant;
import com.bw.medical.realtime.common.util.SQLUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public abstract class BaseSQL {
    public abstract void handle (StreamExecutionEnvironment env, StreamTableEnvironment tabEnv);

    public void start(int port,int parallelism,String ck){

        System.setProperty("HADOOP_USER_NAME","hadoop");
        // TODO 1.基本环境准备
        // 1.1 指定流处理环境
        Configuration conf = new Configuration();
        conf.set(RestOptions.PORT, port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //1.2 设置并行度
        env.setParallelism(parallelism);
        //1.3 指定表执行环境
        StreamTableEnvironment tabEnv = StreamTableEnvironment.create(env);





        // TODO 2.检查点的相关设置
        // 一般 在配置文件中设置 检查点机制  这里写一遍 练习下
        //2.1 开启检查点
//        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE); //精准一次
//        //2.2 设置检查点超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(6000L);
//        //2.3 设置状态取消后,检查点是否保留
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
//        //2.4设置两个检查点之间最小时间间隔
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L); // 上一个检查点结束时间 和 下一个检查点开启时间 之间的时间间隔
//        //2.5 设置重启策略
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30), Time.seconds(3)));
//        //2.6 设置状态后端
//        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall06A/ck/" + ck);
//        //2.7 设置操作hadoop 的用户
//        System.setProperty("HADOOP_USER_NAME","hadoop");
        //TODO 3.业务处理逻辑
        handle(env, tabEnv);
    }

    // 从topic_db 主题中读取数据,创建动态表
    public static void readOdsDb(StreamTableEnvironment tabEev,String groupId) {

        tabEev.executeSql("create table topic_db (" +
                " `database` string, " +
                " `table` string, " +
                " `type` string, " +
                " `data` map<string, string>, " +
                " `old` map<string, string>, " +
                " `ts` bigint, " +
                " `pt` as proctime(), " +
                " et as to_timestamp_ltz(ts, 0), " +
                " watermark for et as et - interval '3' second " +
                ")" + SQLUtil.getKafkaSourceSQL(Constant.TOPIC_DB ,groupId ));
    }

    /**
     * 处方详情表
     * @param tabEev
     * @param groupId
     */
    public static void readOdsPay(StreamTableEnvironment tabEev,String groupId) {

        tabEev.executeSql("create table topic_db_pay (" +
                " `database` string, " +
                " `table` string, " +
                " `type` string, " +
                " `data` map<string, string>, " +
                " `old` map<string, string>, " +
                " `ts` bigint, " +
                " `pt` as proctime(), " +
                " et as to_timestamp(`data`['create_time']), " +
                " watermark for et as et - interval '5' second " +
                ")" + SQLUtil.getKafkaSourceSQL(Constant.TOPIC_DB ,groupId ));
    }

    /**
     * 支付成功表
     * @param tabEev
     * @param groupId
     */
     public static void readOdsPaySuc(StreamTableEnvironment tabEev,String groupId) {

        tabEev.executeSql("create table topic_db_pay_suc (" +
                " `database` string, " +
                " `table` string, " +
                " `type` string, " +
                " `data` map<string, string>, " +
                " `old` map<string, string>, " +
                " `ts` bigint, " +
                " `pt` as proctime(), " +
                " et as to_timestamp(`data`['update_time']), " +
                " watermark for et as et - interval '5' second " +
                ")" + SQLUtil.getKafkaSourceSQL(Constant.TOPIC_DB ,groupId ));
    }


    //从HBase中读取字典数据 创建动态表
    public  void readBaseDic(StreamTableEnvironment tabEev) {
        tabEev.executeSql("CREATE TABLE base_dic (\n" +
                " dic_code string,\n" +// 如果字段是原子类型,则表示这个是rowKey, 字段随意, 字段类型随意
                " info row<dic_name string>, " + // 字段名和 hbase 中的列族名保持一致. 类型必须是 row. 嵌套进去的就是列
                " primary key (dic_code) not enforced " + // 只能用 rowKey做主键
                ") " + SQLUtil.getHBaseDDL(  "dim_base_dic"));
    }
}
