package com.ybw.source;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.PredefinedOptions;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.data.StringData;
import org.apache.flink.table.data.TimestampData;
import org.apache.hudi.common.model.HoodieTableType;
import org.apache.hudi.configuration.FlinkOptions;
import org.apache.hudi.util.HoodiePipeline;

import java.util.HashMap;
import java.util.Map;
import java.util.Objects;

/**
 * @Title: APIHudiSource2HudiSink
 * @Description:
 * @author: ybw
 * @date: 2023/7/6 9:12
 * @Version: 1.0
 */
public class APIHudiSource2HudiSink {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        EmbeddedRocksDBStateBackend rocksDBStateBackend = new EmbeddedRocksDBStateBackend();
        rocksDBStateBackend.setDbStoragePath("D:\\project\\FlinkTutorial\\hudi\\src\\main\\resources\\rocksdb\\APIHudiSource2HudiSink");
        rocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM);
        env.setStateBackend(rocksDBStateBackend);

        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointInterval(1000_0L);
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ckps");

        String sourceTable = "t1";
        String sourceTableBasePath = "hdfs://hadoop102/tmp/hudi_flink/t1";

        Map<String, String> sourceHudiTableOptions = new HashMap<>();
        sourceHudiTableOptions.put(FlinkOptions.PATH.key(), sourceTableBasePath);
        sourceHudiTableOptions.put(FlinkOptions.TABLE_TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
        // this option enable the streaming read
        sourceHudiTableOptions.put(FlinkOptions.READ_AS_STREAMING.key(), "true");
        // specifies the start commit instant time
//        sourceHudiTableOptions.put(FlinkOptions.READ_START_COMMIT.key(), "'20210316134557'");

        HoodiePipeline.Builder sourceHudiBuilder = HoodiePipeline.builder(sourceTable)
                .column("uuid VARCHAR(20)")
                .column("name VARCHAR(10)")
                .column("age INT")
                .column("ts TIMESTAMP(3)")
                .column("`partition` VARCHAR(20)")
                .pk("uuid")
                .partition("partition")
                .options(sourceHudiTableOptions);

        DataStream<RowData> rowDataDataStream = sourceHudiBuilder.source(env);
        rowDataDataStream.print("Source Hudi Table Original Records");

        // 2. 构建数据流
        DataStream<RowData> dataStream = rowDataDataStream.map(new MapFunction<RowData, RowData>() {
            @Override
            public RowData map(RowData record) throws Exception {
                GenericRowData rowData = new GenericRowData(6);
                rowData.setField(0, StringData.fromString(record.getString(0).toString()));
                rowData.setField(1, StringData.fromString(record.getString(1).toString()));
                rowData.setField(2, record.getInt(2));
                rowData.setField(3, TimestampData.fromEpochMillis(System.currentTimeMillis()));
                rowData.setField(4, StringData.fromString("flink-processed"));
                rowData.setField(5, StringData.fromString(record.getString(4).toString()));
                return rowData;
            }
        });

        dataStream.map(Objects::toString).print("Flink Processed");

        // 3. 设置hudi 表参数
        String targetTable = "t2";
        String basePath = "hdfs://hadoop102/tmp/hudi_flink/t2";

        Map<String, String> targetHudiTableOptions = new HashMap<>();
        targetHudiTableOptions.put(FlinkOptions.PATH.key(), basePath);
        targetHudiTableOptions.put(FlinkOptions.TABLE_TYPE.key(), HoodieTableType.MERGE_ON_READ.name());
        targetHudiTableOptions.put(FlinkOptions.PRECOMBINE_FIELD.key(), "ts");

        HoodiePipeline.Builder targetHudiBuilder = HoodiePipeline.builder(targetTable)
                .column("uuid VARCHAR(20)")
                .column("name VARCHAR(10)")
                .column("age INT")
                .column("ts TIMESTAMP(3)")
                .column("flag VARCHAR(10)")
                .column("`partition` VARCHAR(20)")
                .pk("uuid")
                .partition("partition")
                .options(targetHudiTableOptions);

        // 将数据流写入到hudi表中
        // The second parameter indicating whether the input data stream is bounded
        // 第二个参数指示输入数据流是否有界
        targetHudiBuilder.sink(dataStream, false);

        env.execute("APIHudiSource2HudiSink");
    }
}
