package com.atguigu.medical.realtime.app;


import com.atguigu.medical.realtime.common.Constant;
import com.atguigu.medical.realtime.util.SQLUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
 * @Author lzc
 * @Date 2023/4/19 15:28
 */
public abstract class BaseSQLApp {
    
    public void init(int port, int p, String ckAndAndJobName) {
        
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        
        // 1. 获取一个流的执行环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(p);
        // 设置转态后端
        env.setStateBackend(new HashMapStateBackend());// 在生成环境下, 一般通过参数指定
        // 开启 checkpoint
        env.enableCheckpointing(3000);
        // 1.1. 指定一致性的语义:严格一次
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 1.22. 设置 checkpoint 的存储位置
        // 把 checkpoint 存储到 JobManage 的内存中
        //        env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());
        // 把 checkpoint 存储到 hdfs 中
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/gmall2023/" + ckAndAndJobName);
        
        // 1.3. 设置 checkpoint 的超时时间: 10s
        env.getCheckpointConfig().setCheckpointTimeout(10 * 1000);
        // 1.44. 设置 checkpoint 的并发数:1 表示最多只有一个版本的 checkpoint 在执行
        //        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);  // 设置最小间隔之后, 这个参数不需要设置
        // 1.5. 设置两个 checkpoint 之间的最小时间间隔: 500ms 表示上个结束 500ms 之后才会开启下一个
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 1.6. job 取消的时候,是否删除 checkpoint 数据
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
    
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        // 给 sql 应用的 job 起名字
        tEnv.getConfig().getConfiguration().setString("pipeline.name", ckAndAndJobName);
    
        
        
        handle(env, tEnv);
        
        
        
    }
    
    protected abstract void handle(StreamExecutionEnvironment env,
                                   StreamTableEnvironment tEnv);
    
    
    
    public void readOdsMedical(StreamTableEnvironment tEnv, String groupId){
        tEnv.executeSql("create table ods_medical(" +
                            " `op` string, " +
                            " `after` map<string, string>, " +
                            " `before` map<string, string>, " +
                            " `source` map<string, string>, " +
                            " `ts` bigint," +
                            " `pt` as proctime(), " +// 添加一个处理时间, lookup join 的时候要用
                            "  et as to_timestamp_ltz(ts, 3), " + // 事件时间
                            " watermark for et as et - interval '3' second " +
                            ")" + SQLUtil.getKafkaSourceDDL(Constant.TOPIC_ODS_MEDICAL, groupId));
      
    }
    
    public void readDict(StreamTableEnvironment tEnv){
        tEnv.executeSql("create table dict(" +
                            " id string, " +
                            " name string " +
                            ")with(" +
                            " 'connector' = 'jdbc'," +
                            " 'url' = 'jdbc:mysql://hadoop162:3306/medical?useSSL=false'," +
                            " 'table-name' = 'base_dic', " +
                            " 'lookup.cache' = 'PARTIAL'," +
                            " 'lookup.partial-cache.max-rows' = '200'," +
                            //                            " 'lookup.partial-cache.expire-after-write' = '20 second', " +
                            " 'lookup.partial-cache.expire-after-access' = '20 second', " +
                            " 'username' = 'root', " +
                            " 'password' = 'aaaaaa' " +
                            ")");
    }
    
    
}
