package com.millstein.realtime.app.base;

import com.millstein.realtime.common.Constants;
import com.millstein.realtime.util.SqlUtil;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public abstract class BaseSqlApp {

    /**
     * 具体数据处理的逻辑，由子类编写
     * @param env 执行环境
     * @param tableEnv 表执行环境
     */
    public abstract void handle(StreamExecutionEnvironment env, StreamTableEnvironment tableEnv);

    /**
     * 初始化app
     * @param webUIPort web界面端口号
     * @param parallelism 并行度
     * @param appName app名称
     */
    public void init(int webUIPort, int parallelism, String appName) {
        // 1.设置操作用户
        System.setProperty("HADOOP_USER_NAME", "tsing");

        Configuration conf = new Configuration();
        conf.setInteger("rest.port", webUIPort);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(parallelism);

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        // 3.设置checkpoint
        // 开启checkpoint，并设置时间间隔
        env.enableCheckpointing(5000L);
        // 设置checkpoint的精确性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        // 设置state保存到内存中
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint的存储地址
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/stream/" + appName);
        // 设置连续的两次checkpoint之间最小的时间
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(5000);
        // 设置checkpoint可以容忍的最大失败次数
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        // 设置当job取消时是否需要保存Checkpoint数据，默认自动删除数据。这里是1.13.1版本的写法
        env.getCheckpointConfig().enableExternalizedCheckpoints(
                CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
        );
        // 设置checkpoint的超时时间，如果超过时间，就表示失败
        env.getCheckpointConfig().setCheckpointTimeout(5 * 60 * 1000L);

        // 6.执行数据的处理
        this.handle(env, tableEnv);
    }

    /**
     * 从kafka中读取ods层中的数据
     * @param tableEnv 表执行环境
     * @param groupId 消费组id
     */
    public void readOdsDataFromKafka(StreamTableEnvironment tableEnv, String groupId) {
        tableEnv.executeSql(
                "create table maxwell_table (" +
                "    `database` string," +
                "    `table` string," +
                "    `type` string," +
                "    `ts` bigint," +
                "    `data` map<string, string>," +
                "    `old` map<string, string>," +
                "    `pt` as proctime()" +
                ")" + SqlUtil.getKafkaSourceDDL(Constants.TOPIC_DB, groupId)
        );
    }

    /**
     * mysql中读取base_dic表中的数据
     * @param tableEnv 表的执行环境
     */
    public void readBaseDicFromMysql(StreamTableEnvironment tableEnv) {
        tableEnv.executeSql(
                "create table base_dic (" +
                "    dic_code string," +
                "    dic_name string" +
                ") with (" +
                "    'connector' = 'jdbc'," +
                "    'url' = 'jdbc:mysql://hadoop102:3306/gmall?useSsl=false'," +
                "    'username' = 'root'," +
                "    'password' = '123456'," +
                "    'table-name' = 'base_dic'," +
                "    'lookup.cache.max-rows' = '10'," +
                "    'lookup.cache.ttl' = '30 second'" +
                ")"
        );
    }

    /**
     * 从订单预处理表中读取数据
     * @param tableEnv 表执行环境
     * @param groupId 消费者组
     */
    public static void readOrderPreProcessFromKafka(StreamTableEnvironment tableEnv, String groupId) {
        tableEnv.executeSql(
                "create table order_pre_process ( " +
                "    id string, " +
                "    order_id string, " +
                "    sku_id string, " +
                "    sku_name string, " +
                "    create_time string, " +
                "    source_id string, " +
                "    source_type_code string, " +
                "    source_type_name string, " +
                "    sku_num string, " +
                "    split_origin_amount string, " +
                "    split_total_amount string, " +
                "    split_activity_amount string, " +
                "    split_coupon_amount string, " +
                "    od_ts bigint, " +
                "    user_id string, " +
                "    province_id string, " +
                "    operate_time string, " +
                "    order_status string, " +
                "    `type` string, " +
                "    `old` map<string, string>, " +
                "    oi_ts bigint, " +
                "    activity_id string, " +
                "    activity_rule_id string, " +
                "    coupon_id string, " +
                "    row_opt_ts timestamp_ltz(3) " +
                ")" + SqlUtil.getKafkaSourceDDL(Constants.TOPIC_DWD_TRADE_ORDER_PRE_PROCESS, groupId)
        );

    }
}
