package com.zyx.flinkdemo.sql.catalog;

import com.zyx.flinkdemo.sql.utils.ConnectUtils;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.ObjectPath;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.apache.hadoop.hive.metastore.api.Table;

/**
 * @author Yaxi.Zhang
 * @since 2021/8/4 09:29
 * desc: 读取Kafka数据写入Hive
 */
@Slf4j
public class HiveCatalogWithKafkaSource {
    public static void main(String[] args) throws Exception {
        // 获取Flink流式执行环境
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(60 * 1000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(90 * 1000);
        env.setStateBackend(new HashMapStateBackend());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://bigdata021:8020/dev/hivetest");
        env.setParallelism(3);

        // 获取流式table执行环境
        EnvironmentSettings settings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        // tableEnv.sqlQuery("select * from test.kafka_ticker").execute().print();

        // 创建HiveCatalog
        HiveCatalog hiveCatalog = new HiveCatalog("myHive", "db_test", "input/hive");

        // 注册HiveCatalog
        tableEnv.registerCatalog("myHive", hiveCatalog);

        // 使用HiveCatalog
        tableEnv.useCatalog("myHive");


        tableEnv.executeSql("CREATE DATABASE IF NOT EXISTS test");

        // 创建KafkaSource表
        final String kafkaTopic = "HK_TRADE_TICKER";
        final String kafkaServer = "bigdata022:9092,bigdata023:9092,bigdata024:9092";
        final String kafkaConsumerGroup = "test_group_210804";
        // Kafka数据为JSON格式
        final String kafkaFormat = "json";
        // 从最迟的offset开始读取数据
        final String scanStartUpMode = "earliest-offset";
        // Kafka数据源建表语句
        final String createKafkaSourceSql = "CREATE TABLE test.kafka_ticker(\n" +
                "    `securityCode` INT,\n" +
                "    `price` DOUBLE, \n" +
                "    `sendTime` BIGINT,\n" +
                "    `time_ltz` AS TO_TIMESTAMP_LTZ(sendTime, 3),\n" +
                "    `pts` AS PROCTIME(),\n" +
                "WATERMARK FOR time_ltz AS time_ltz - INTERVAL '5' SECOND)";

        final String createKafkaSourceTableSql = ConnectUtils
                .getCreateKafkaSourceTableSql(kafkaTopic, kafkaServer, kafkaConsumerGroup, kafkaFormat, scanStartUpMode, createKafkaSourceSql);
        tableEnv.executeSql(createKafkaSourceTableSql);

        String alterHiveSql = "ALTER TABLE db_test.hive_ticker SET TBLPROPERTIES (\n" +
                "'sink.partition-commit.trigger'='partition-time' , \n" +
                "'partition.time-extractor.timestamp-pattern'='$dt $hr:$mi:00' ,\n" +
                "'sink.partition-commit.delay'='5 s',\n" +
                "'sink.partition-commit.watermark-time-zone'='Asia/Shanghai',\n" +
                "'sink.partition-commit.policy.kind'='metastore,success-file')";

        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        tableEnv.executeSql(alterHiveSql);
        
        log.info("++++++++++++++++++++创建HiveSink表成功");


        // 读取Kafka数据插入到Hive中
        String insertSql = "INSERT INTO db_test.hive_ticker SELECT securityCode, price, sendTime,\n" +
                "DATE_FORMAT(time_ltz, 'yyyy-MM-dd'), DATE_FORMAT(time_ltz, 'HH'), DATE_FORMAT(time_ltz, 'mm') FROM test.kafka_ticker";

        // String insertSql = "INSERT INTO db_test.hive_ticker SELECT securityCode, price, sendTime FROM test.kafka_ticker";
        // 切换成普通的操作形式
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        tableEnv.executeSql(insertSql);

    }
}
