package com.danan.realtime.app;

import com.danan.realtime.common.Constant;
import com.danan.realtime.util.ResourceBundleUtil;
import com.danan.realtime.util.SourceUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author NanHuang
 * @Date 2023/1/24
 */
public abstract class BaseSqlApp {
    public void init(Integer port,Integer parallelism,String groupId){
        System.setProperty("HADOOP_USER_NAME","danan");
        // 1 创建编程环境
        Configuration conf = new Configuration();
        conf.setInteger("rest.port",port);//设置前web端口
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(parallelism);//设置并行度
        // 2 设置状态后端和Checkpoint
        env.enableCheckpointing(3000);//设置checkpoint的周期
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);//设置同时checkpoint的数量
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);//checkpoint的超时时间
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointStorage(ResourceBundleUtil.getProperty("checkpoint.storage.url.prefix") + groupId);//设置checkpoint的存储路径
        // 3 创建SQL环境
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        tableEnv.getConfig().getConfiguration().setString("pipeline.name",groupId);//给应用设置名称
        // 4 数据处理
        invoke(env,tableEnv);
    }

    public abstract void invoke(StreamExecutionEnvironment env, StreamTableEnvironment tableEnv);

    public void readOdsDb(StreamTableEnvironment tableEnv,String groupId) {
        // 创建源表，读取Kafka的ods_db数据
        String createTableSQL = "create table source (" +
                "   `database` string," +
                "   `table` string," +
                "   `type` string," +
                "   `data` map<string,string>," +
                "   `old` map<string,string>," +
                "   `ts` bigint," +
                "   `pt` as proctime()" +
                ")" +
                SourceUtil.getKafkaWith(Constant.TOPIC_ODS_DB,groupId);
        tableEnv.executeSql(createTableSQL);
    }

    public void readBaseDic(StreamTableEnvironment tableEnv){
        // 创建字典表，读取MySQL中字典表的数据
        String createTableSQL = "create table base_dic (" +
                "   dic_code string," +
                "   dic_name string" +
                ")" +
                "with (" +
                "   'connector'='jdbc'," +
                "   'url'='" + ResourceBundleUtil.getProperty("mysql.url") + "'," +
                "   'table-name'='base_dic'," +
                "   'username'='" + ResourceBundleUtil.getProperty("mysql.username") + "'," +
                "   'password'='" + ResourceBundleUtil.getProperty("mysql.password") + "'," +
                "   'lookup.cache.ttl'='2 hour'," +
                "   'lookup.cache.max-rows'='100'" +
                ")";
        tableEnv.executeSql(createTableSQL);
    }
}
