package com.atguigu.bigdata.edu.realtime.app;


import com.atguigu.bigdata.edu.realtime.common.Constant;
import com.atguigu.bigdata.edu.realtime.util.SQLUtil;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.concurrent.TimeUnit;

public abstract class BaseSQLApp {
    public void init(int port, int p, String ckName) {
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        //读取ods_db数据
        //为了方便使用web端观察运行情况，指定某个端口号
        Configuration conf = new Configuration();
        conf.setInteger("rest.port", port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        //本地调式方便，并行度设置一般和kafka 的topic的分区数保持一致
        env.setParallelism(p);
        //开启chechpoint
        env.enableCheckpointing(3000);
        // 设置状态后端: 1.13 之前: 1. memory(内存, JobManager 的内存) 2. fs(本地:TaskManager 的内存, checkpoint: hdfs)  3. rocksdb(rocks 数据库, hdfs)
        // 1.13 之后状态后端只负责本地怎么存: 1. HashMapStateEnd 2. rocksdb
        // 1.13 checkpoint 专门有相关 api 处理: 1. JobManager内存  2. hdfs
        env.setStateBackend(new HashMapStateBackend());

        //有了checkpoint之后，程序通常情况会尝试失败重启，设置重启次数
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3,
                //故障时间间隔
                Time.of(1L, TimeUnit.DAYS),
                //重新启动尝试之间的延迟
                Time.of(3L, TimeUnit.MINUTES)));

        //设置checkpoint相关参数
        //设置checkpoint的模式
        //1. 获取checkpoint配置
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        //1.1设置checkpoint存储路径
        checkpointConfig.setCheckpointStorage("hdfs://hadoop162:8020/edu/ck/" + ckName);
        //1.2设置checkpoint执行模式：严格一次
        checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //1.3并行度设置
        //方式一:一个流程序里面设置最大并行度为1，防止并行度冲突
//        checkpointConfig.setMaxConcurrentCheckpoints(1);
        //方式二：一个流程序里面设置并行度间隔时间，两次checkpoint的最小间隔
        checkpointConfig.setMinPauseBetweenCheckpoints(500);

        //1.4设置checkpoint超时时间，1分钟
        checkpointConfig.setCheckpointTimeout(60000);
        //1.5设置checkpoint次数，失败亦会累加
        checkpointConfig.setTolerableCheckpointFailureNumber(5);
        // 1.6 程序取消的时候, 要不要删除 checkpoint 数据. RETAIN_ON_CANCELLATION 当job 取消的时候保留外部的 checkpoint
        checkpointConfig.setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        //表的执行环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        tEnv.getConfig().getConfiguration().setString("pipeline.name", ckName);
        //对流处理

        handle(env, tEnv);


    }

    protected abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tEnv) ;

    public void readOdsDb(StreamTableEnvironment tEnv, String groupId){
        tEnv.executeSql("CREATE TABLE ods_db ( " +
                                "  `database` string, " +
                                "  `table` string, " +
                                "  `type` string, " +
                                "  `ts` bigint,  " +
                                "  `data` map<string,string>,  " +
                                "  `old` map<string,string>, " +
                                "  `pt` as proctime()  " +
                                ") " + SQLUtil.getKafkaSource(Constant.TOPIC_ODS_DB, groupId));
    }


    public void readBaseSource(StreamTableEnvironment tEnv){
        tEnv.executeSql("create table base_source( " +
                "id bigint, " +
                "source_site string, " +
                "source_url string " +
                ") with ( " +
                "   'connector' = 'jdbc', " +
                "   'url' = 'jdbc:mysql://hadoop162:3306/edu?useSSL=false', " +
                "   'table-name' = 'base_source', " +
                "   'username' = 'root', " +
                "   'password' = 'aaaaaa', " +
                "   'lookup.cache.max-rows' = '10',  " +
                "   'lookup.cache.ttl' = '1 hour' " +
                " )");
    }

    public void readBaseDic(StreamTableEnvironment tEnv){
        tEnv.executeSql("create table base_dic( " +
                                    "dic_code string, " +
                                    "dic_name string " +
                                    ") with ( " +
                                    "   'connector' = 'jdbc', " +
                                    "   'url' = 'jdbc:mysql://hadoop162:3306/edu?useSSL=false', " +
                                    "   'table-name' = 'base_dic', " +
                                    "   'username' = 'root', " +
                                    "   'password' = 'aaaaaa', " +
                                    "   'lookup.cache.max-rows' = '10',  " +
                                    "   'lookup.cache.ttl' = '1 hour' " +
                                    " )");
    }
    public void readTestPaper(StreamTableEnvironment tEnv) {
        tEnv.executeSql("create table test_paper ( " +
                "  id bigint, " +
                "  course_id bigint " +
                ") WITH ( " +
                "  'connector' = 'jdbc', " +
                "  'driver' = 'com.mysql.cj.jdbc.Driver', " +
                "  'url' = 'jdbc:mysql://hadoop162:3306/edu?useSSL=false', " +
                "  'table-name' = 'test_paper', " +
                "  'username' = 'root', " +
                "  'password' = 'aaaaaa',  " +
                "  'lookup.cache.max-rows' = '10'," +
                "  'lookup.cache.ttl' = '1 hour' " +
                ")");
    }
}
