package com.fwmagic.flinkcdc.mysql;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.StringDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * @description: （DataStream版本）FlinkCDC 监控 MYSQL binlog
 * mysql版本：5.7.24
 * 开启mysql的binlog：
 * vim /etc/my.cnf
 * [mysqld]
 * log_bin=mysql-bin
 * binlog-format=ROW
 * binlog-do-db=test
 * server-id=1
 * <p>
 * 其中：binlog-do-db=test 表示只监控 test 数据库的 binlog
 * <p>
 * Flink集群（StandAlone）中测试：
 * 1.提交任务 |  bin/flink run -m hadoop1:8081 -c com.fwmagic.flinkcdc.mysql.FlinkCDC ./myjars/fwmagic-flinkcdc-1.0-jar-with-dependencies.jar
 * 提交任务报错：Hadoop is not in the classpath/dependencies.
 * 解决方法：手动下载：flink-shaded-hadoop-2-uber-2.7.5-10.0.jar 到 lib 目录下，重启集群
 * 2.增删改数据表的记录，看控制台打印情况
 * 3.手动保存 savepoint | bin/flink savepoint ccc5745581789efcc1970562f2dc8df4 hdfs://ns1/flinkcdc/savepoint/
 * 4.手动关闭任务，即cancel任务
 * 5.再次修改数据库中数据记录
 * 6.重启任务同时指定 savepoint 目录 |  bin/flink run -m hadoop1:8081 -s hdfs://ns1/flinkcdc/savepoint/savepoint-ccc574-e4b047907c06 -c com.fwmagic.flinkcdc.mysql.FlinkCDC ./myjars/fwmagic-flinkcdc-1.0-jar-with-dependencies.jar
 * 7.查看控制台打印情况，是否完成了断点续传功能。
 * 8.完成断点续传功能。
 * @author: fangwei
 * @date: 2022/5/18
 **/
public class FlinkCDC {
    public static void main(String[] args) throws Exception {
        //1.创建执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //2.设置并行度
        env.setParallelism(1);

        //3.开启 checkpoint，并设置相关参数
        env.enableCheckpointing(5000);
        //设置 CK 一致性语义
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //设置任务关闭的时候保留最后一次 CK 数据
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //设置从 CK 自动重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 2000L));
        //设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://ns1/flinkcdc/checkpoint2",true));

        //设置访问 HDFS 的用户名
        System.setProperty("HADOOP_USER_NAME", "hadoop");

        //4.创建 FlinkCDC 的 Source
        MySqlSource<String> build =
        //DebeziumSourceFunction<String> build =
                MySqlSource.<String>builder()
                .hostname("hadoop3")
                .port(3306)
                .username("root")
                .password("qwert123")
                .databaseList("test")
                //可选项配置，如果不指定参数，则会读取上一个配置的所有表数据
                //注意：指定时需要使用：db.table 的方式
                .tableList("test.t_user")
                .startupOptions(StartupOptions.initial())
                .deserializer(new StringDebeziumDeserializationSchema())
                .build();


        //5.source:使用 CDC Source 从 mysql 中读取数据
        DataStreamSource<String> source = env.fromSource(build, WatermarkStrategy.noWatermarks(), "flinkcdc-mysql");
        //DataStreamSource<String> source = env.addSource(build);

        //6.sink:打印数据
        source.print();

        env.execute("Flink CDC");

    }
}
