package streaming.demo.bigdata.hive;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.SqlDialect;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.table.catalog.hive.HiveCatalog;
import base.model.ZBTopic1;
import utils.PropertiesReader;

import java.util.Properties;

/**
 * 数据来源：kafka
 * 输出目标：hive
 *
 */
public class SinkHiveTest1 {

    private static String kafkaServers = PropertiesReader.get("default.kafka.servers");
    private static String topicFrom = PropertiesReader.get("default.kafka.topic.json.C");

    private static String hiveCatalog = PropertiesReader.get("target.hive.catalog.name");
    private static String hiveDefaultDB = PropertiesReader.get("target.hive.db");
    private static String hiveConfDir = PropertiesReader.get("target.hive.conf.dir");

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(10000, CheckpointingMode.EXACTLY_ONCE);
        env.setParallelism(1);

        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .useBlinkPlanner()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env, settings);

        Configuration configuration = tEnv.getConfig().getConfiguration();
        configuration.setString("table.exec.hive.fallback-mapred-reader", "true");

        // Hive 输出源配置
        HiveCatalog hive = new HiveCatalog(hiveCatalog, hiveDefaultDB, hiveConfDir);
        tEnv.registerCatalog(hiveCatalog, hive); // 注册 catalog
        tEnv.useCatalog(hiveCatalog); // 使用注册的catalog
        tEnv.useDatabase(hiveDefaultDB);

        tEnv.getConfig().setSqlDialect(SqlDialect.DEFAULT);
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", kafkaServers);
        props.setProperty("group.id", "flink-consumer-SinkHiveTest1");
        DataStream inputStream = env.addSource(new FlinkKafkaConsumer(topicFrom, new SimpleStringSchema(), props));
        DataStream<ZBTopic1> dataStream = inputStream.map(new MapFunction<String, ZBTopic1>() {
            @Override
            public ZBTopic1 map(String s) throws Exception {
                JSONObject obj = JSON.parseObject(s);
                return new ZBTopic1(obj.getString("t_id"),obj.getString("t_key"),obj.getString("t_val"));
            }
        });
        inputStream.print("data:");
        tEnv.createTemporaryView("kafkaSource", dataStream);
        tEnv.from("kafkaSource").printSchema();

        tEnv.getConfig().setSqlDialect(SqlDialect.HIVE);

        String insertSQL = "insert into zzb_test select id, key, val from kafkaSource";
        tEnv.executeSql(insertSQL);

    }
}
