package com.wudl.hudi.sink;

import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author ：wudl
 * @date ：Created in 2022-02-19 22:18
 * @description：
 * @modified By：
 * @version: 1.0
 */

public class MysqlJoinMysqlHuDiRead {
    public static void main(String[] args) throws Exception {
        // 1-获取表执行环境getExecutionEnvironment
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // TODO： 由于增量将数据写入到Hudi表，所以需要启动Flink Checkpoint检查点
        env.setParallelism(1);
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

        tableEnv.executeSql(
                "CREATE TABLE order_hudi(\n" +
                        " id BIGINT PRIMARY KEY NOT ENFORCED," +
                        " name STRING," +
                        " age INT," +
                        " birthday STRING," +
                        " phone STRING," +
                        " address STRING," +
                        " ts STRING" +
                        ")" +
                        "WITH (" +
                        "    'connector' = 'hudi'," +
                        "    'path' = 'file:///D:/myslqjoinmysqlhudiSink'," +
                        "    'path' = 'hdfs://192.168.1.161:8020/hudi-warehouse/myslqjoinmysqlhudiSink' ," +
                        "    'read.streaming.enabled' = 'true'," +
                        "    'read.streaming.check-interval' = '4'" +
                        ")"
        );
        tableEnv.executeSql("select * from  order_hudi ").print();


    }
}
