package com.zhang.flink.hudi;

import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.PredefinedOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.concurrent.TimeUnit;

/**
 * @title: hudi测试
 * Hudi下一代流处理平台，提供了表 事务 高效的upsert/delete 高级索引 流摄取服务 数据集群/压缩优化和并发，同时保持数据的开源文件格式。
 * @author: zhangyifan
 * @date: 2022/11/7 09:03
 */
public class HudiTest1 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 设置状态后端
        EmbeddedRocksDBStateBackend embeddedRocksDBStateBackend = new EmbeddedRocksDBStateBackend(true);
        embeddedRocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM);
        env.setStateBackend(embeddedRocksDBStateBackend);

        // checkpoint 配置
        env.enableCheckpointing(TimeUnit.SECONDS.toMillis(30), CheckpointingMode.EXACTLY_ONCE);
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/hudi_ckps1");
        checkpointConfig.setMinPauseBetweenCheckpoints(TimeUnit.SECONDS.toMillis(20));
        checkpointConfig.setTolerableCheckpointFailureNumber(5);
        checkpointConfig.setCheckpointTimeout(TimeUnit.MINUTES.toMillis(1));
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        System.setProperty("HADOOP_USER_NAME", "zhang");

        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        tableEnv.executeSql("CREATE TABLE sourceT (" +
                "  uuid varchar(20)," +
                "  name varchar(10)," +
                "  age int," +
                "  ts timestamp(3)," +
                "  `partition` varchar(20)" +
                ") WITH (" +
                "  'connector' = 'datagen'," +
                "  'rows-per-second' = '1'" +
                ")");

        tableEnv.executeSql("create table t2(" +
                "  uuid varchar(20)," +
                "  name varchar(10)," +
                "  age int," +
                "  ts timestamp(3)," +
                "  `partition` varchar(20)" +
                ")" +
                "with (" +
                "  'connector' = 'hudi'," +
                "  'path' = 'hdfs://hadoop102:8020/hudi_flink/t2'," +
                "  'table.type' = 'MERGE_ON_READ'" +
                ")");

        tableEnv.executeSql("insert into t2 select * from sourceT");


        
        
        env.execute();
    }
}
