package com.mlamp.me;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

import java.time.Duration;

/**
 * 类描述：
 *
 * @ClassName KafkaToHive
 * @Description:
 * @Author: lyz
 * @Date: 2021/9/6 下午9:50
 */
public class KafkaToHive {
    public static void main(String[] args) {
        StreamExecutionEnvironment senv = StreamExecutionEnvironment.getExecutionEnvironment();
        senv.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);
        senv.setParallelism(3);
        EnvironmentSettings tableEnvSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode()
                .build();

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(senv, tableEnvSettings);

        //
        tableEnv.getConfig().getConfiguration().set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE, CheckpointingMode.EXACTLY_ONCE);

        tableEnv.getConfig().getConfiguration().set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL, Duration.ofSeconds(20));

        String catalogName = "my_catalog";

        HiveCatalog catalog = new HiveCatalog(
                catalogName,              // catalog name
                "default",                // default database
                "./src/main/resources",  // Hive config (hive-site.xml) directory
                "2.3.4"                   // Hive version
        );

        tableEnv.registerCatalog(catalogName, catalog);
        tableEnv.useCatalog(catalogName);

        tableEnv.executeSql("CREATE DATABASE IF NOT EXISTS stream_tmp");
        tableEnv.executeSql("DROP TABLE IF EXISTS stream_tmp.log_kafka");

        tableEnv.executeSql("create table stream_tmp.log_kafka(" +
                                "user_id String,\n" +
                                "order_amount Double,\n" +
                                "log_ts Timestamp(3),\n" +
                                "WATERMARK FOR log_ts AS log_ts -INTERVAL '5' SECOND" +
                        ")WITH(" +
                                " 'connector' = 'kafka',\n" +
                                "'topic' = 'test',\n" +
                                " 'properties.bootstrap.servers' = 'localhost:9092',\n" +
                                "'properties.group.id' = 'flink1',\n" +
                                "'scan.startup.mode' = 'earliest-offset',\n" +
                                "'format' = 'json',\n" +
                                "'json.fail-on-missing-field' = 'false',\n" +
                                "'json.ignore-parse-errors' = 'true'" +
                        ")");
        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);

        tableEnv.executeSql("CREATE DATABASE IF NOT EXISTS hive_tmp");
        tableEnv.executeSql("DROP TABLE IF EXISTS hive_tmp.log_hive");

        tableEnv.executeSql(" CREATE TABLE hive_tmp.log_hive (\n" +
                "                     user_id STRING,\n" +
                "                     order_amount DOUBLE\n" +
                "           ) PARTITIONED BY (\n" +
                "                     dt STRING,\n" +
                "                     hr STRING\n" +
                "           ) STORED AS PARQUET\n" +
                "             TBLPROPERTIES (\n" +
                "                    'sink.partition-commit.trigger' = 'partition-time',\n" +
                "                    'sink.partition-commit.delay' = '1 min',\n" +
                "                    'format' = 'json',\n" +
                "                    'sink.partition-commit.policy.kind' = 'metastore,success-file',\n" +
                "                    'partition.time-extractor.timestamp-pattern'='$dt $hr:00:00'" +
                "           )");
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        tableEnv.executeSql("" +
                "        INSERT INTO hive_tmp.log_hive\n" +
                "        SELECT\n" +
                "               user_id,\n" +
                "               order_amount,\n" +
                "               DATE_FORMAT(log_ts, 'yyyy-MM-dd'), DATE_FORMAT(log_ts, 'HH')\n" +
                "               FROM stream_tmp.log_kafka");
    }
}