package table;

import bean.FileHdfsBean;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import parser.JsonParser;

import java.util.Objects;

public class Kafka2Hive extends Kafka2TableApp {
    private static final String HADOOP_USER_NAME = "root";
    private static final String CHECKPOINT_NAME = "kafka2hive";
    private static final String KAFKA_SERVERS = "slaves02:9092,slaves01:9092,slaves03:9092";
    private static final String CHECKPOINT_STORAGE = "hdfs://slaves01:8020";
    private static final String KAFKA_TOPICS = "filebeat-hdfs";
    private static final String KAFKA_GROUP_ID = "kafka2hive";
    private static final String HIVE_CONF_DIR = "/etc/hive/3.1.4.0-315/0";
    private static final String HIVE_DB_NAME = "event";
    private static final String HIVE_TABLE_NAME = "action";

    @Override
    public void handler(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream) {
        System.out.println("handler...");
        sink2hive(tableEnv, sourceStream);
    }

    private void sink2hive(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream) {
        System.out.println("save2hive...");
        Configuration configuration = tableEnv.getConfig().getConfiguration();
        // true使用mr，false则使用flink，在tableEnv上设置会作用再所有接收器上
        configuration.setString("table.exec.hive.fallback-mapred-reader", "false");
        // 创建Hive Catalog
        String name = "kafka2hive";
        String defaultDatabase = "default";
        HiveCatalog hiveCatalog = new HiveCatalog(name, defaultDatabase, HIVE_CONF_DIR);
        System.out.println("注册hive Catalog...");
        // 注册hive Catalog
        tableEnv.registerCatalog(name, hiveCatalog);
        tableEnv.useCatalog(name);
        System.out.println("解析字段,封装样例类...");
        // 解析字段,封装样例类
        SingleOutputStreamOperator<FileHdfsBean> beanStream = sourceStream
                .map(JsonParser::ParseHdfs)
                .filter(Objects::nonNull);
        // beanStream流转临时表
        System.out.println("转临时表...");
        tableEnv.executeSql("drop table if exists tmpTable");
        tableEnv.createTemporaryView("tmpTable", beanStream);
        System.out.println("创建Hive表...");
        // 切换为Hive的语法
        tableEnv.getConfig().setSqlDialect(SqlDialect.HIVE);
        // 创建Hive表
        String table = HIVE_DB_NAME + "." + HIVE_TABLE_NAME;
        // tableEnv.executeSql("drop table if exists " + table);
        tableEnv.executeSql("create database IF NOT EXISTS " + HIVE_DB_NAME);
        tableEnv.executeSql("CREATE TABLE IF NOT EXISTS " + table + "\n" +
                "(uid string, uid_type string, agent string, ip string,\n" +
                "`timestamp` timestamp,`time` timestamp,`year` string,`month` string,`week` string,`hour` string, `minute` string,properties map<string,string>)\n" +
                "PARTITIONED BY(game_id int,timezone string,event string,day date)\n" +
                "ROW FORMAT DELIMITED\n" +
                "FIELDS TERMINATED BY '\\t'\n" +
                "COLLECTION ITEMS TERMINATED BY ','\n" +
                "MAP KEYS TERMINATED BY ':'\n" +
                "stored as orc TBLPROPERTIES (\n" +
                "'sink.partition-commit.trigger'='process-time',\n" +
                "'sink.partition-commit.delay'='0s',\n" +
                "'sink.partition-commit.policy.kind'='metastore,success-file',\n" +
                "'sink.shuffle-by-partition.enable'='true',\n" +
                "'auto-compaction'='true',\n" +
                "'compaction.file-size'='128MB'\n" +
                ")"
        );
        // 切换回默认语法
        tableEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        System.out.println("插入数据到hive表...");
        // 插入数据到hive表
        tableEnv.executeSql("INSERT INTO  " + table + "\n" +
                "SELECT uid, uid_type, agent, ip,`timestamp`,`time`,`year`,`month`,`week`,`hour`,`minute`," +
                "str_to_map(properties,'&',':') as properties,game_id ,timezone ,event ,`day` from tmpTable");
    }

    public static void main(String[] args) {
        Kafka2Hive app = new Kafka2Hive();
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String servers = parameterTool.get("servers", KAFKA_SERVERS);
        String topic = parameterTool.get("topic", KAFKA_TOPICS);
        String groupId = parameterTool.get("groupId", KAFKA_GROUP_ID);
        String checkpointStorage = parameterTool.get("checkpointStorage", CHECKPOINT_STORAGE);
        String checkpointName = parameterTool.get("checkpointName", CHECKPOINT_NAME);
        String hadoopUser = parameterTool.get("hadoopUser", HADOOP_USER_NAME);
        app.initAndStart(servers, topic, groupId, checkpointStorage, checkpointName, hadoopUser);
    }
}
