package org.example;

import com.ververica.cdc.connectors.base.options.StartupOptions;
import com.ververica.cdc.connectors.sqlserver.SqlServerSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.TimeCharacteristic;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;

import java.util.Properties;


public class SqlServerLocalSourceExample {

    public static void main(String[] args) throws Exception {
        //自定义时间转换配置
        Properties properties = new Properties();
        properties.setProperty("converters", "dateConverters");
        properties.setProperty("dateConverters.type", "org.example.SqlServerConverter");

        SourceFunction<String> sourceFunction = SqlServerSource.<String>builder()
                .hostname("localhost")
                .port(1433)
                .database("test")
                .tableList("dbo.user")
                .username("sa")
                .password("123456")
                .deserializer(new JsonDebeziumDeserializationSchema())
                /*
                  initial (default): Performs an initial snapshot on the monitored database tables upon first startup, and continue to read the latest binlog.
                  latest-offset: Never to perform snapshot on the monitored database tables upon first startup, just read from the end of the binlog which means only have the changes since the connector was started.
                  timestamp: Never to perform snapshot on the monitored database tables upon first startup, and directly read binlog from the specified timestamp. The consumer will traverse the binlog from the beginning and ignore change events whose timestamp is smaller than the specified timestamp.
                  specific-offset: Never to perform snapshot on the monitored database tables upon
                */
                .startupOptions(StartupOptions.initial())
                .debeziumProperties(properties)
                .build();
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime);

        /*
          Flink-CDC将读取binlog的位置信息以状态的方式保存在checkpoint,如果想要做到断点续传,需要从Checkpoint或者Savepoint启动程序
         */
        //1 开启Checkpoint,每隔5秒钟做一次CK
        env.enableCheckpointing(5000L);
        //2 指定CK的一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //3 设置任务关闭的时候保留最后一次CK数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //4 指定从CK自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000L));
        //5 设置状态后端  三种状态后端：MemoryStateBackend（内存状态后端） FsStateBackend（文件系统状态后端 hdfs） RocksDBStateBackend（RocksDB状态后端）
//        env.setStateBackend(new RocksDBStateBackend("file:///usr/local/localSqlServer/ck"));
//        env.setStateBackend(new MemoryStateBackend());
        //env.setStateBackend(new FsStateBackend("hdfs://sc2:8020/flinkCDC"));
        //6 设置访问HDFS的用户名
        System.setProperty("HADOOP_USER_NAME", "root");

        // sink 并行度设置为1以保证消费顺序
        //env.addSource(sourceFunction).addSink(new MysqlSink()).setParallelism(1);

        env.addSource(sourceFunction).print().setParallelism(1);

        env.execute("sqlServerToMysql");
    }

}
