package com.ybw.sink;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.PredefinedOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

import java.util.concurrent.TimeUnit;

/**
 * @Title: HudiDemo
 * @Description:
 * @author: ybw
 * @date: 2023/7/4 17:34
 * @Version: 1.0
 */
public class SqlDataGen2HudiSink {
    public static void main(String[] args) {

//        Configuration conf = new Configuration();
//        conf.set("fs.defaultFS", "hdfs://hadoop102:8020");
//        conf.set("fs.hdfs.impl", DistributedFileSystem.class.getName());
//        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
//        conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
//
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironment();
//        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        /*
            固定延迟重启策略是尝试给定次数重新启动作业。
            如果超过最大尝试次数，则作业失败。在两次连续重启尝试之间，会有一个固定的延迟等待时间。通过在flink-conf.yaml中配置参数：
            restart-strategy: fixed-delay             # fixed-delay:固定延迟策略
            restart-strategy.fixed-delay.attempts: 5   # 尝试5次，默认Integer.MAX_VALUE
            restart-strategy.fixed-delay.delay: 10s    # 设置延迟时间10s，默认为 akka.ask.timeout时间
        */
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(10000,20));

        // 设置状态后端RocksDB
        EmbeddedRocksDBStateBackend embeddedRocksDBStateBackend = new EmbeddedRocksDBStateBackend(true);

        // idea运行时指定rocksdb存储路径
        embeddedRocksDBStateBackend.setDbStoragePath("D:\\project\\FlinkTutorial\\hudi\\src\\main\\resources\\rocksdb");

        embeddedRocksDBStateBackend.setPredefinedOptions(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM);
        env.setStateBackend(embeddedRocksDBStateBackend);

        // checkpoint配置
        env.enableCheckpointing(TimeUnit.SECONDS.toMillis(30), CheckpointingMode.EXACTLY_ONCE);
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointStorage("hdfs://hadoop102:8020/ckps");
        checkpointConfig.setMinPauseBetweenCheckpoints(TimeUnit.SECONDS.toMillis(20));
        checkpointConfig.setTolerableCheckpointFailureNumber(5);
        checkpointConfig.setCheckpointTimeout(TimeUnit.MINUTES.toMillis(1));
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        StreamTableEnvironment sTableEnv = StreamTableEnvironment.create(env);

        sTableEnv.executeSql("CREATE TABLE sourceT (\n" + "  uuid varchar(20),\n" + "  name varchar(10),\n" + "  age int,\n" + "  ts timestamp(3),\n" + "  `partition` varchar(20)\n" + ") WITH (\n" + "  'connector' = 'datagen',\n" + "  'rows-per-second' = '1'\n" + ")");

        sTableEnv.executeSql("create table t2(\n" + "  uuid varchar(20),\n" + "  name varchar(10),\n" + "  age int,\n" + "  ts timestamp(3),\n" + "  `partition` varchar(20)\n" + ")\n" + "with (\n" + "  'connector' = 'hudi',\n" + "  'path' = 'hdfs://hadoop102:8020/tmp/hudi_flink/t2',\n" + "  'table.type' = 'MERGE_ON_READ'\n" + ")");

        sTableEnv.executeSql("insert into t2 select * from sourceT");

    }
}
