package com.wudl.flink.iceberg;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.Catalog;
import org.apache.flink.table.catalog.hive.HiveCatalog;

/**
 * @ClassName : Blink2IcebergMain
 * @Description :
 * @Author :wudl
 * @Date: 2021-09-08 23:52
 */

public class Blink2IcebergMain {
    public static void main(String[] args) {

        /* 流式table处理环境*/
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 2000L));

        // 设置后端状态
        env.setStateBackend(new FsStateBackend("hdfs://node01.com:8020/flink/warehouse/"));
        System.setProperty("HADOOP_USER_NAME", "hdfs");



        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        tableEnv.executeSql("CREATE CATALOG hadoop_catalog WITH (\n" +
                "  'type'='iceberg',\n" +
                "  'catalog-type'='hadoop',\n" +
                "  'warehouse'='hdfs://node01.com:8020/flink/warehouse/',\n" +
                "  'property-version'='1'\n" +
                ")");



        // change catalog
        tableEnv.useCatalog("hadoop_catalog");
//        tableEnv.useDatabase("iceberg_db");
        tableEnv.executeSql("CREATE DATABASE if not exists iceberg_db");
        tableEnv.useDatabase("iceberg_db");


        // create iceberg result table
        tableEnv.executeSql("drop table iceberg_002");
        tableEnv.executeSql("create table iceberg_002(\n" +
                "user_id string comment 'user_id',\n" +
                "order_amount double comment 'order_amount',\n" +
                "log_ts string\n" +
                ")");



        String HIVE_CATALOG = "myhive";
        String DEFAULT_DATABASE = "tmp";
        String HIVE_CONF_DIR = "/etc/hive/3.1.4.0-315/0/";
        Catalog catalog = new HiveCatalog(HIVE_CATALOG, DEFAULT_DATABASE, HIVE_CONF_DIR);
        tableEnv.registerCatalog(HIVE_CATALOG, catalog);
        tableEnv.useCatalog("myhive");
        // create kafka stream table

        // crate kafka stream table
        tableEnv.executeSql("drop table if exists ods_k_2_iceberg");
        tableEnv.executeSql("create table ods_k_2_iceberg(\n" +
                "user_id string,\n" +
                "order_amount double,\n" +
                "log_ts TIMESTAMP(3),\n" +
                "WATERMARK FOR log_ts AS log_ts - INTERVAL '5' SECOND\n" +
                ") WITH(\n" +
                "'connector'='kafka',\n" +
                "'topic'='t_kafka_03',\n" +
                "'scan.startup.mode'='latest-offset',\n" +
                "'properties.bootstrap.servers'='node01.com:6667',\n" +
                "'properties.group.id'='test-lcy',\n" +
                "'format'='json'\n" +
                ")");

        System.out.println("---> insert into iceberg table from kafka stream table...");
        tableEnv.executeSql("insert into hadoop_catalog.iceberg_db.iceberg_002 select user_id, order_amount, DATE_FROMAT(log_ts,'yyyy-MM-dd') FROM ods_k_2_iceberg");

    }
}
