package com.ybw.case1;

import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.PredefinedOptions;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.util.HoodiePipeline;

import java.util.HashMap;
import java.util.Map;
import java.util.Objects;

/**
 * @Title: FromKafka2HudiApp
 * @Description:
 * @author: ybw
 * @date: 2023/7/7 9:31
 * @Version: 1.0
 */
public class FromKafka2HudiApp {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        EmbeddedRocksDBStateBackend rocksDBStateBackend = new EmbeddedRocksDBStateBackend();
        rocksDBStateBackend.setDbStoragePath("D:\\project\\FlinkTutorial\\hudi\\src\\main\\resources\\rocksdb\\APIKafkaSource2HudiSink");
        rocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM);
        env.setStateBackend(rocksDBStateBackend);

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointInterval(1000_0L);
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ckps");

        // 1. 定义kafka source
        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
                .setBootstrapServers("hadoop102:9092")
                .setTopics("test111")
                .setGroupId("KafkaSource2IcebergSink")
                .setStartingOffsets(OffsetsInitializer.latest())
                .setValueOnlyDeserializer(new SimpleStringSchema())
                .build();

        // 2. 构建数据流
        DataStream<RowData> dataStream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka-source").map(new MapFunction<String, RowData>() {
            @Override
            public RowData map(String s) throws Exception {
                GenericRowData rowData = new GenericRowData(5);
                String[] split = s.split(" ");
                rowData.setField(0, StringData.fromString(split[0]));
                rowData.setField(1, StringData.fromString(split[1]));
                rowData.setField(2, Integer.valueOf(split[2]));
                rowData.setField(3, TimestampData.fromEpochMillis(System.currentTimeMillis()));
                rowData.setField(4, StringData.fromString(split[3]));
                return rowData;
            }
        });

        dataStream.map(Objects::toString).print("process-console");

        // 3. 设置hudi 表参数
        String targetTable = "t1";
        String basePath = "hdfs://hadoop102:8020/warehouse/hudi_flink/t1";

        Map<String, String> options = new HashMap<>();
        options.put(FlinkOptions.PATH.key(), basePath);
        options.put(FlinkOptions.TABLE_TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
        options.put(FlinkOptions.PRECOMBINE_FIELD.key(), "ts");

        HoodiePipeline.Builder builder = HoodiePipeline.builder(targetTable)
                .column("uuid VARCHAR(20)")
                .column("name VARCHAR(10)")
                .column("age INT")
                .column("ts TIMESTAMP(3)")
                .column("`partition` VARCHAR(20)")
                .pk("uuid")
                .partition("partition")
                .options(options);

        // 将数据流写入到hudi表中
        // The second parameter indicating whether the input data stream is bounded
        // 第二个参数指示输入数据流是否有界
        builder.sink(dataStream, false);

        env.execute("FromKafka2HudiApp");
    }
}
