package com.saga.energy.app;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.saga.energy.bean.Energy15;
import com.saga.energy.bean.Msg;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;

import java.util.Properties;

public class KafkaToHive {

    public static void main(String[] args) {

        System.setProperty("HADOOP_USER_NAME","saga");

        String name = "myhive"; // Catalog名字

        String defaultDatabase = "saga_dw"; //默认数据库

        String hiveConfDir = "/opt/module/hive-3.1.2/conf"; // hive配置文件的目录.需要把hive-site.xml添加到该目录，目前只认本地文件系统

        HiveCatalog hive = new HiveCatalog(name, defaultDatabase, hiveConfDir);


        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner() //hive要求使用
                .inStreamingMode() //提供streaming方式，可以不设置，默认是批量
                .build();

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.enableCheckpointing(10000, CheckpointingMode.EXACTLY_ONCE);

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "hadoop01:9092,hadoop02:9092,hadoop03:9092");
        properties.setProperty("group.id", "energy2");
        properties.setProperty("auto.offset.reset", "earliest");

        DataStreamSource<String> energy_15_min = env.addSource(new FlinkKafkaConsumer<>("T_KAFKA_MSG_SOURCE_TABLE", new SimpleStringSchema(), properties));

        SingleOutputStreamOperator<Msg> energy15Stream = energy_15_min.map(new MapFunction<String, Msg>() {
            @Override
            public Msg map(String s) throws Exception {
                JSONObject r = JSON.parseObject(s);
                Msg msg = JSON.parseObject(s, Msg.class);
                return msg;
            }
        });

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create( env, settings );

        tableEnv.createTemporaryView("t_KafkaMsgSourceTable", energy15Stream);

        Configuration configuration = tableEnv.getConfig().getConfiguration();

        configuration.setString("table.exec.hive.fallback-mapred-reader", "true");

        tableEnv.registerCatalog(name, hive);

        tableEnv.useCatalog(name);

        tableEnv.useDatabase(defaultDatabase);

        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);

        tableEnv.executeSql("drop table if exists t_kafkaMsg2hiveTable");

        tableEnv.executeSql("CREATE TABLE IF NOT EXISTS t_kafkaMsg2hiveTable ("

                + "ip STRING,"

                + "msg STRING"

                + ")"

                + " PARTITIONED BY (dt STRING) STORED AS orc TBLPROPERTIES ("

                + " 'partition.time-extractor.timestamp-pattern'='$dt'," // hive 分区提取器提取时间戳的格式

                + " 'sink.partition-commit.trigger'='partition-time'," // 分区触发提交的类型可以指定 "process-time" 和 "partition-time" 处理时间和分区时间

                + " 'sink.partition-commit.delay'='0s'," // 提交延迟

                + " 'sink.partition-commit.policy.kind'='metastore,success-file'" // 提交类型

                + ")");


        tableEnv.executeSql("INSERT INTO t_kafkaMsg2hiveTable SELECT ip,msg,dt FROM t_KafkaMsgSourceTable");

    }
}
