package com.atguigu.gmall.realtime.app;

import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.util.SQLUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import static org.apache.flink.streaming.api.environment.CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION;

/**
 * @Author lzc
 * @Date 2023/2/13 09:15
 */
public abstract class BaseSQLApp {
    public void init(int port, int p, String ckAndJobName) {
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        // 1. 读取数据 ods_db 数据
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        // 给 job 设置 jobName
        conf.setString("pipeline.name", ckAndJobName);
        
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        // 设置并行度: 保持和 kafka 的分区数一致
        env.setParallelism(p);
        // 开启 checkpoint
        // 1. 设置状态后端: HashMapStateBackend 和 rocksdb
        env.setStateBackend(new HashMapStateBackend());
        // 2. 开启 checkpoint
        env.enableCheckpointing(3000);
        // 3. 设置 checkpoint 的一致性
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 4. 设置 checkpoint 的存储目录
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/gmall/" + ckAndJobName);
        // 5. 设置 checkpoint 的超时时间. 超过这个时间, 放弃这次 checkpoint
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        // 6. checkpoint 的并发数: 同时最多执行一个 checkpoint
        // env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);  // 可以省略
        // 7. 两个 checkpoint 的之间最小时间间隔
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        // 8. 设置 checkpoint 的保存策略
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(RETAIN_ON_CANCELLATION);
        
        // sql: 表的环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        
        handle(env, tEnv);
        
    }
    
    public void readOdsDb(StreamTableEnvironment tEnv, String groupId) {
        tEnv.executeSql("create table ods_db(" +
                            "`database` string, " +
                            "`table` string, " +
                            "`type` string, " +
                            "`ts` bigint, " +
                            "`data` map<string, string>, " +
                            "`old` map<string, string>, " +
                            " pt as proctime()," +
                            " et as to_timestamp_ltz(ts, 0)," +
                            " watermark for et as et - interval '3' second " +  // 定义水印
                            ")" + SQLUtil.getDDLKafkaSource(Constant.TOPIC_ODS_DB, groupId));
    }
    
    public void readBaseDic(StreamTableEnvironment tEnv){
        tEnv.executeSql("CREATE TABLE base_dic (" +
                            "  dic_code string," +
                            "  dic_name STRING " +
                            ") WITH (" +
                            "  'connector' = 'jdbc'," +
                            "  'driver' = 'com.mysql.cj.jdbc.Driver'," +
                            "  'url' = 'jdbc:mysql://hadoop162:3306/gmall2022?useSSL=false'," +
                            "  'table-name' = 'base_dic', " +
                            "  'username' = 'root', " +
                            "  'lookup.cache.ttl' = '30 second', " +
                            "  'lookup.cache.max-rows' = '10', " +
                            "  'password' = 'aaaaaa' " +
                            ")");
    }
    
    public abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv);
}
