package com.atguigu.realtime.app;

import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.util.SQLUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @Author lzc
 * @Date 2022/7/16 9:50
 */
public abstract class BaseSQLApp {
    
    protected abstract void handle(StreamExecutionEnvironment env,
                                   StreamTableEnvironment tEnv);
    
    public void init(int port, int p, String ck){
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);  // 端口的配置, 只在本地运行的时候有效
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(p);  // 并行度的设置: source 一般要和kafka的topic的分区数保持一致
    
        // 开启checkpoint
        env.enableCheckpointing(5000);
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/gmall/" + ck);
    
        env.getCheckpointConfig().setCheckpointTimeout(30000);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
    
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.getConfig().getConfiguration().setString("pipeline.name", ck);
        handle(env, tEnv);
        
//        tEnv.getConfig().setIdleStateRetention();
//        tEnv.getConfig().getConfiguration().setString("table.exec.state.ttl", "1 hour");
        
    }
    
    public void readOdsDb(StreamTableEnvironment tEnv, String groupId){
        tEnv.executeSql("create table ods_db(" +
                            " `database` string, " +
                            " `table` string, " +
                            " `type` string, " +
                            " `ts` string, " +
                            " `data` map<string, string>, " +
                            " `old` map<string, string>, " +
                            " `pt` as proctime() " +
                            ")" + SQLUtil.getKafkaSourceDDL(Constant.TOPIC_ODS_DB, groupId));
    }
    
    public void readBaseDic(StreamTableEnvironment tEnv){
        tEnv.executeSql("create table `base_dic`( " +
                            "`dic_code` string, " +
                            "`dic_name` string, " +
                            "primary key(`dic_code`) not enforced " +
                            ")WITH ( " +
                            "'connector' = 'jdbc', " +
                            "'url' = 'jdbc:mysql://hadoop162:3306/gmall2022?useSSL=false', " +
                            "'table-name' = 'base_dic', " +
                            "'lookup.cache.max-rows' = '10'," +
                            "'lookup.cache.ttl' = '1 hour'," +
                            "'username' = 'root', " +
                            "'password' = 'aaaaaa', " +
                            "'driver' = 'com.mysql.cj.jdbc.Driver' " +
                            ")");
    }
    
}
