package com.leiyuee.flink.batch.kafka;

import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.data.RowData;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.util.HoodiePipeline;

import java.util.*;

public class Kafka2hudi {

    static String JOB_NAME = "sync cnhuam0itpoc85_EasyStar.Kafka2hudi from kafka to hudi 1026";
    public static void main(String[] args) throws Exception {
        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setParallelism(1);

        String kafka_topicName = "cnhuam0itpoc85_EasyStar.dbo.sysDevice";
        String kafka_bootStrpServers = "cnhuam0itpoc85:9092,cnhuam0itpoc86:9092,cnhuam0itpoc87:9092";
        String kafka_consumer_group = "connect-cluster";

        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", kafka_bootStrpServers);
        properties.setProperty("group.id", kafka_consumer_group);
        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");  //key反序列化

        //SourceFunction kafkaSource = null;//new FlinkKafkaConsumer(kafka_topicName, new CustomerDeserialization(), properties);

        FlinkKafkaConsumer<RowData> kafkaSource = new FlinkKafkaConsumer<>(kafka_topicName, new CustomerRowDataKafkaDeserialization(), properties);
        DataStream<RowData> dataStream = env.addSource(kafkaSource);

        //BUSI[HR] DBName=[EasyStar] TableName[sysDevice]
        //String targetTable = "${DBName}_${TableName}_hudi";
        //String basePath = "hdfs://hdp-cluster/hua-datalake/ods/HR/_tmp2/${BUSI}/${DBName}/${TableName}";
        String targetTable = "EasyStar_sysDevice_hudi";
        String targetHiveTable = "EasyStar_sysDevice_hudi_hive";
        String basePath = "hdfs://hdp-cluster/hua-datalake/ods/HR/_tmp2/HR/EasyStar/sysDevice";
        //String basePath = "hdfs://cnhuam0itpoc86:50070/hua-datalake/ods/HR/_tmp2/HR/EasyStar/sysDevice";

        Map<String, String> options = new HashMap<>();
        options.put(FlinkOptions.PATH.key(), basePath);
        options.put(FlinkOptions.TABLE_NAME.key(), "easyStar_sysDevice_hudi");
        options.put(FlinkOptions.TABLE_TYPE.key(), HoodieTableType.COPY_ON_WRITE.name());
        // startWith write
        options.put(FlinkOptions.OPERATION.key(), "upsert");// write.operation
        options.put(FlinkOptions.RECORD_KEY_FIELD.key(), "ID");// write.recordkey.field
        options.put(FlinkOptions.PRECOMBINE_FIELD.key(), "ts");// write.precombine.field
        options.put(FlinkOptions.PARTITION_PATH_FIELD.key(), "partition_day");// write.partitionpath.field
        options.put(FlinkOptions.WRITE_TASKS.key(), "1");// write.tasks
        options.put(FlinkOptions.WRITE_RATE_LIMIT.key(), "2000");// write.rate.limit
        // startWith compaction
        options.put(FlinkOptions.COMPACTION_TASKS.key(), "1");// compaction.tasks
        options.put(FlinkOptions.COMPACTION_ASYNC_ENABLED.key(), "true");// compaction.async.enabled
        options.put(FlinkOptions.COMPACTION_TRIGGER_STRATEGY.key(), "num_commits");// compaction.trigger.strategy
        options.put(FlinkOptions.COMPACTION_DELTA_COMMITS.key(), "1");// compaction.delta_commits
        // startWith changelog
        options.put(FlinkOptions.CHANGELOG_ENABLED.key(), "true");// changelog.enabled
        // startWith read
        options.put(FlinkOptions.READ_AS_STREAMING.key(), "true");// read.streaming.check-interval
        options.put(FlinkOptions.READ_STREAMING_CHECK_INTERVAL.key(), "3");// read.streaming.check-interval
        // startWith hive_sync
        options.put(FlinkOptions.HIVE_SYNC_ENABLED.key(), "true");// hive_sync.enable
        options.put(FlinkOptions.HIVE_SYNC_MODE.key(), "hms");// hive_sync.mode
        options.put(FlinkOptions.HIVE_SYNC_METASTORE_URIS.key(), "thrift://cnhuam0itpoc85:9083");// hive_sync.metastore.uris
        options.put(FlinkOptions.HIVE_SYNC_JDBC_URL.key(), "jdbc:hive2://cnhuam0itpoc87:10000");// hive_sync.jdbc_url
        options.put(FlinkOptions.HIVE_SYNC_TABLE.key(), targetHiveTable);// hive_sync.table
        options.put(FlinkOptions.HIVE_SYNC_DB.key(), "ods_hr_tmp2");// hive_sync.db
        options.put(FlinkOptions.HIVE_SYNC_USERNAME.key(), "jabil");// hive_sync.username
        options.put(FlinkOptions.HIVE_SYNC_PASSWORD.key(), "jabil");// hive_sync.password
        options.put(FlinkOptions.HIVE_SYNC_PASSWORD.key(), "true");// hive_sync.password

        HoodiePipeline.Builder builder = HoodiePipeline.builder(targetTable);

        // 假设 columns_list 返回的是一个 List<String> 数据类型的 List 集合
        List<String> columnsList = Arrays.asList(
                "`ID` INT",
                "`FType` STRING",
                "`DeviceNo` STRING",
                "`UseType` BIGINT",
                "`Place` STRING",
                "`IP` STRING",
                "`Note` STRING",
                "`GroupID` BIGINT",
                "`CommPort` STRING",
                "`DeviceType` STRING",
                "`username` STRING",
                "`password` STRING",
                "`starttime` TIMESTAMP(3)",
                "`endtime` TIMESTAMP(3)"
        );
        columnsList.forEach(builder::column);

        builder.column("ts TIMESTAMP(3)")
                .column("`partition_day` VARCHAR(20)")
                .column("`partition_day` VARCHAR(20)")
                //.pk("${PK}")
                .pk("ID")
                .partition("partition_day") //currentDatetime (yyyy/MM/dd)
                .options(options);

        builder.sink(dataStream, false); // The second parameter indicating whether the input data stream is bounded
        // Execute program, beginning computation.

        env.execute(JOB_NAME);
    }
}
