package table;

import bean.FileEntityBean;
import bean.FileHdfsBean;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import parser.JsonParser;

import java.util.Objects;

public class Kafka2Iceberg extends Kafka2TableApp {
    private static final String HADOOP_USER_NAME = "root";
    private static final String CHECKPOINT_NAME = "kafka2hive";
    private static final String KAFKA_SERVERS = "slaves02:9092,slaves01:9092,slaves03:9092";
    private static final String CHECKPOINT_STORAGE = "hdfs://slaves01:8020";
    private static final String KAFKA_TOPICS = "filebeat-entity";
    private static final String KAFKA_GROUP_ID = "kafka2iceberg";
    private static final String HIVE_CONF_DIR = "/etc/hive/3.1.4.0-315/0";
    private static final String ICEBERG_DB_NAME = "event";
    private static final String ICEBERG_TABLE_NAME = "profile";

    @Override
    public void handler(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream) {
        System.out.println("handler...");
        sink2iceberg(tableEnv, sourceStream);
    }

    private void sink2iceberg(StreamTableEnvironment tableEnv, DataStreamSource<String> sourceStream) {
        System.out.println("save2iceberg...");
        Configuration configuration = tableEnv.getConfig().getConfiguration();
        // true使用mr，false则使用flink，在tableEnv上设置会作用再所有接收器上
        configuration.setString("table.exec.hive.fallback-mapred-reader", "false");
        // 创建iceberg Catalog
        String catalog = "iceberg";
        String catalogSQL = "CREATE CATALOG " + catalog + " WITH (\n" +
                "  'type'='iceberg',\n" +
                "  'catalog-type'='hive',\n" +
                "  'uri'='thrift://slaves02:9083',\n" +
                "  'clients'='5',\n" +
                "  'warehouse'='hdfs://slaves01:8020/warehouse/tablespace/managed/hive' \n" +
                ")";
        System.out.println("catalogSQL:" + catalogSQL);
        try {
            tableEnv.executeSql(catalogSQL);
        } catch (Exception e) {
            System.out.println("tableEnv.executeSql(catalogSQL) error:" + e.getMessage());
        }


        System.out.println("注册iceberg Catalog...");
        // 注册iceberg Catalog
        tableEnv.useCatalog("iceberg");
        System.out.println("解析字段,封装样例类...");
        // 解析字段,封装样例类
        SingleOutputStreamOperator<FileEntityBean> beanStream = sourceStream
                .map(JsonParser::ParseEntity)
                .filter(Objects::nonNull);
        // 创建数据库
        String table = catalog + "." + ICEBERG_DB_NAME + "." + ICEBERG_TABLE_NAME;
        tableEnv.executeSql("CREATE DATABASE IF NOT EXISTS " + ICEBERG_DB_NAME);
        tableEnv.useDatabase(ICEBERG_DB_NAME);
        System.out.println("创建table:" + table);
        String tableSQL = "CREATE TABLE IF NOT EXISTS " + table +  " (\n" +
                "    id BIGINT COMMENT 'unique id',\n" +
                "    data STRING\n" +
                "    ) WITH ('write.format.default'='ORC')";
        System.out.println("tableSQL:" + tableSQL);
        tableEnv.executeSql(tableSQL);
        System.out.println("创建sourceTable");
        String tmpTable = catalog + "." + ICEBERG_DB_NAME + "." + "sourceTable";
        String tmpTableSQL =  "CREATE TABLE IF NOT EXISTS " + tmpTable +" (\n" +
                " userid int,\n" +
                " f_random_str STRING\n" +
                ") WITH (\n" +
                " 'connector' = 'datagen',\n" +
                " 'rows-per-second'='100',\n" +
                " 'fields.userid.kind'='random',\n" +
                " 'fields.userid.min'='1',\n" +
                " 'fields.userid.max'='100',\n" +
                "'fields.f_random_str.length'='10'\n" +
                ")";
        tableEnv.executeSql(tmpTableSQL);
        // 插入数据到iceberg表
        String insertSQL = "INSERT INTO  " + table + "\n" +  "SELECT * FROM " + tmpTable;
        System.out.println("insertSQL:" + insertSQL);
        tableEnv.executeSql(insertSQL);
        String querySQL = "SELECT * from " + table + " limit 20";
        System.out.println("querySQL:" + querySQL);
        tableEnv.executeSql(querySQL);
    }

    public static void main(String[] args) {
        Kafka2Iceberg app = new Kafka2Iceberg();
        ParameterTool parameterTool = ParameterTool.fromArgs(args);
        String servers = parameterTool.get("servers", KAFKA_SERVERS);
        String topic = parameterTool.get("topic", KAFKA_TOPICS);
        String groupId = parameterTool.get("groupId", KAFKA_GROUP_ID);
        String checkpointStorage = parameterTool.get("checkpointStorage", CHECKPOINT_STORAGE);
        String checkpointName = parameterTool.get("checkpointName", CHECKPOINT_NAME);
        String hadoopUser = parameterTool.get("hadoopUser", HADOOP_USER_NAME);
        app.initAndStart(servers, topic, groupId, checkpointStorage, checkpointName, hadoopUser);
    }
}
