package streaming.api.sink;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.*;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import org.apache.flink.table.descriptors.Json;
import org.apache.flink.table.descriptors.Kafka;
import org.apache.flink.table.descriptors.Schema;
import utils.CommonUtils;
import utils.PropertiesReader;

/**
 * kafka -> hive
 * 数据来源： kafka [myTest]
 * Sink目标：file-HDFS [/user/downOrUpload/ZZB/flinktest]
 */
public class SinkTest4_hive {

    private static String kafkaServers = PropertiesReader.get("default.kafka.servers");
    private static String topicFrom = PropertiesReader.get("default.kafka.topic.json.A");
    private static String kafkaFormat = "json";

    private static String hiveCatalog = PropertiesReader.get("target.hive.catalog.name");
    private static String hiveDefaultDB = PropertiesReader.get("target.hive.db");
    private static String hiveConfDir = PropertiesReader.get("target.hive.conf.dir");

    public static void main(String[] args) throws Exception {

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(10000, CheckpointingMode.EXACTLY_ONCE);

        EnvironmentSettings settings = EnvironmentSettings.newInstance()
            .useBlinkPlanner() //hive要求使用
            .inStreamingMode() //提供streaming方式
            .build();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);
        env.setParallelism(1);
        Configuration configuration = tEnv.getConfig().getConfiguration();
        configuration.setString("table.exec.hive.fallback-mapred-reader", "true");

        // Hive 输出源配置
        HiveCatalog hive = new HiveCatalog(hiveCatalog, hiveDefaultDB, hiveConfDir);
        tEnv.registerCatalog(hiveCatalog, hive); // 注册 catalog
        tEnv.useCatalog(hiveCatalog); // 使用注册的catalog
        tEnv.useDatabase(hiveDefaultDB);
        //指定方言
        tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);

        String[] fields = {"t_id", "t_key", "t_val"};
        String[] fieldTypes = {"STRING", "STRING", "STRING"};

        StringBuffer sb1 = new StringBuffer();
        sb1.append("CREATE TABLE inputTable (");
        sb1.append(CommonUtils.dealStringByFieldAndFieldType(fields, fieldTypes));
        sb1.append(") WITH (");
        sb1.append("'connector' = 'kafka',");
        sb1.append("'topic' = '").append(topicFrom).append("',");
        sb1.append("'properties.bootstrap.servers' = '").append(kafkaServers).append("',");
        sb1.append("'format' = '").append(kafkaFormat).append("',");
        sb1.deleteCharAt(sb1.length() - 1);
        sb1.append(")");

        System.out.println("inputTable SQL: " + sb1.toString());
        tEnv.executeSql(sb1.toString());

        tEnv.getConfig().setSqlDialect(SqlDialect.HIVE);

//        TableResult result2 = tEnv.executeSql("select * from zzb_test");
//        result2.print();

        //向hive表写入数据
        String insertSQL = "insert into zzb_test select t_id,t_key,t_val from inputTable ";
        tEnv.executeSql(insertSQL);

    }


}
