package net.bwie.flink;

import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.contrib.streaming.state.EmbeddedRocksDBStateBackend;
import org.apache.flink.contrib.streaming.state.RocksDBStateBackend;
import org.apache.flink.runtime.state.StateBackend;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;

/**
 * Flink CDC 实时采集MySQL表数据：全量 + 增量，设置Checkpoint检查点
 * @author xuanyu
 * @date 2025/10/20
 */
public class _02CdcMysqlCheckpointDemo {

	public static void main(String[] args) throws Exception{
		//1.执行环境-env
		StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
		env.setParallelism(1);

		/*
			todo Flink Checkpoint检查点设置
		 */
		// 表示State存储在内存中
		env.setStateBackend(new HashMapStateBackend()) ;
		// 表示State存储在RocksDB数据库中
		env.setStateBackend(new EmbeddedRocksDBStateBackend()) ;
		// todo s1-启用Checkpoint机制
		env.enableCheckpointing(3000);
		// s2-设置语义模式
		env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
		// s3-2个Checkpoint之间最小间隔时间
		env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
		// s4-Checkpoint超时时间1分钟
		env.getCheckpointConfig().setCheckpointTimeout(60000);
		// s5-Checkpoint失败最大容忍次数
		env.getCheckpointConfig().setTolerableCheckpointFailureNumber(2);
		// s6-同时仅有1个Checkpoint操作
		env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
		// s7-当FlinkJob取消时，保存Checkpoint数据
		env.getCheckpointConfig().setExternalizedCheckpointCleanup(
			CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION
		);
		// s8-Checkpoint时栅栏不对齐，性能比较好
		env.getCheckpointConfig().enableUnalignedCheckpoints();
		// todo s9-Checkpoint保存HDFS路径
		env.getCheckpointConfig().setCheckpointStorage("hdfs://node101:8020/flink-checkpoint/CdcMysqlCheckpointDemo");
		// s10-Flink Job任务结束时，启用Checkpoint
		Configuration config = new Configuration();
		config.set(ExecutionCheckpointingOptions.ENABLE_CHECKPOINTS_AFTER_TASKS_FINISH, true);
		env.configure(config);


		//2. 数据源-source
		/*
		Flink CDC 属于Flink Source数据源，实时从数据库表中获取数据
			核心参数：启动模式startup mode
				1. 全量采集数据
					先读取表中历史数据，后监控日志获取增量数据
				2. 增量采集数据
					直接监控日志获取增量数据

				【增量数据：插入insert新数据、更新update老新数据、删除delete过期数据】

		 */
		MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
			.hostname("node101")
			.port(3306)
			.databaseList("flink_mall")
			.tableList("flink_mall.comment_info")
			.username("root")
			.password("123456")
			.startupOptions(StartupOptions.initial())  // 全量
//			.startupOptions(StartupOptions.latest()) // 增量
			.deserializer(new JsonDebeziumDeserializationSchema())
			.build();
		DataStreamSource<String> stream = env.fromSource(
			mySqlSource, WatermarkStrategy.noWatermarks(), "MySQL Source"
		);
		stream.print("cdc");

		//5.触发执行
		env.execute("CdcMysqlDemo") ;
	}

}
