package com.tencent.tbds.demo.flink;


import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;

/**
 * flink 实时数据写入hive表
 */
public class FlinkKafkaSinkHiveDemo {
    private final static Logger logger = LoggerFactory.getLogger(FlinkKafkaSinkHiveDemo.class);

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        EnvironmentSettings tableEnvSettings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, tableEnvSettings);
        tableEnv.getConfig().getConfiguration().set(ExecutionCheckpointingOptions.CHECKPOINTING_MODE,
                CheckpointingMode.EXACTLY_ONCE);
        tableEnv.getConfig().getConfiguration().set(ExecutionCheckpointingOptions.CHECKPOINTING_INTERVAL,
                Duration.ofSeconds(20));

        String catalogName = "hive_catalog";
        String defaultDatabase = "flink_cdc";
        String hiveConfDir = "/etc/hive/conf"; // a local path
        String hadoopConfDir = "/etc/hadoop/conf.empty"; // a local path
        String version = "2.2.0-TBDS-5.2.0.1";
        HiveCatalog catalog = new HiveCatalog(catalogName, defaultDatabase, hiveConfDir, version);
        tableEnv.registerCatalog(catalogName, catalog);
        tableEnv.useCatalog(catalogName);

        tableEnv.executeSql("DROP TABLE IF EXISTS flink_cdc.test_kafka_source");
        tableEnv.executeSql("CREATE TABLE flink_cdc.test_kafka_source(\n" +
                        "`id` int,\n" +
                        "`user_id` int,\n" +
                        "`product_id` int,\n" +
                        "`server_id` int,\n" +
                        "`duration` int,\n" +
                        "`times` string,\n" +
                        "`time` TIMESTAMP(3) METADATA FROM 'timestamp',\n" +
                        "WATERMARK FOR `time` AS `time` - INTERVAL '1' SECOND\n" +
                        ") WITH (" +
                        "'connector' = 'kafka',\n" +
                " 'topic' = 'flink-cdc-kafka',\n" +
                " 'properties.bootstrap.servers' = 'tbds-10-31-1-10:6669',\n" +
                " 'scan.startup.mode' = 'earliest-offset',\n" +
                " 'properties.security.protocol'='SASL_PLAINTEXT',\n" +
                " 'properties.sasl.mechanism'='PLAIN',\n" +
                "'properties.sasl.jaas.config'='org.apache.flink.kafka.shaded.org.apache.kafka.common.security.plain.PlainLoginModule required username=\"kafka\"  password=\"kafka@Tbds.com\";',\n" +
                " 'format' = 'json',\n" +
                " 'properties.group.id' = 'testGroup',\n" +
                " 'json.fail-on-missing-field' = 'false',\n" +
                " 'json.ignore-parse-errors' = 'true'\n" +
                ")");

        logger.info("====================hive sink==============================");
        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        tableEnv.executeSql("DROP TABLE IF EXISTS flink_cdc.test_hive_sink");
        tableEnv.executeSql("CREATE TABLE flink_cdc.test_hive_sink (\n" +
                "`id` int,\n" +
                "`user_id` int,\n" +
                "`product_id` int,\n" +
                "`server_id` int,\n" +
                "`duration` int,\n" +
                "`times` string,\n" +
                "`time` timestamp\n" +
                ") PARTITIONED BY (dt STRING, hour STRING, min STRING) STORED AS parquet TBLPROPERTIES (\n" +
                "  'is_generic'='false',\n" +
                "  'sink.partition-commit.trigger'='process-time',\n" +
                "  'sink.partition-commit.delay'='0s',\n" +
                "  'sink.partition-commit.policy.kind'='metastore,success-file'\n" +
                ")");
        logger.info("====================hive sink 2==============================");
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        logger.info("====================hive sink 3==============================");
        tableEnv.executeSql("insert into hive_catalog.flink_cdc.test_hive_sink\n" +
                "SELECT `id`, `user_id`,`product_id`,`server_id`,`duration`,`times`,`time`, DATE_FORMAT(`time`, 'yyyy-MM-dd'), DATE_FORMAT(`time`, 'HH'), CONCAT(LPAD(DATE_FORMAT(`time`, 'mm'), 1, '??'), '0') FROM hive_catalog.flink_cdc.test_kafka_source");
    }

}
