package com.yangu.flink.cdc

import com.ververica.cdc.connectors.mysql.source.MySqlSource
import com.ververica.cdc.connectors.mysql.table.StartupOptions
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.scala._

/**
 * flink开启checkpoint步骤
 * 1. 设置checkpoint的时间间隔(两次checkpoint的首首之间的间隔)
 * 2. 配置重启策略
 * 3. 配置checkpoint持久化路径
 * 4. 配置checkpoint的一致性
 * 5. 配置checkpoint的超时时间
 * 6. 配置checkpoint的最大次数
 * 7. 配置第一次checkpoint的尾和第二次checkpoint的首之间的时间间隔
 *
 * 本地调试flink程序访问hdfs需要进行如下操作
 * https://blog.csdn.net/wmpisadba/article/details/117952310
 * 1. 添加flink和hadoop的桥接依赖, 该依赖可以在maven中央仓库中查找
 * <dependency>
 *    <groupId>org.apache.flink</groupId>
 *    <artifactId>flink-shaded-hadoop-3-uber</artifactId>
 *    <version>3.1.1.7.0.3.0-79-7.0</version>
 * </dependency>
 * 2. 添加环境变量HADOOP_USER_NAME=root
 *
 */
object Flink_CDCWithCheckpoint {

  def main(args: Array[String]): Unit = {

    //1. 创建流式执行环境
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    //1.1 设置并行度为1
    env.setParallelism(1)
    //1.2 开始checkpoint并且指定状态后端
    //从flink1.12开始重新设计了状态后端的api
    //等价与原来的FsStateBackend配置hdfs路径
    env.setStateBackend(new HashMapStateBackend())

    //开启checkpoint配置, 两次checkpoint首首之间的间隔
    env.enableCheckpointing(5000L)
    //配置重启策略
    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(20, 5000L))
    val checkpointConfig = env.getCheckpointConfig
    //checkpoint的url路径
    checkpointConfig.setCheckpointStorage("hdfs://hadoop100:8020/gmall-flink/checkpoint")
    //checkpoint的模式: 精确一致性
    checkpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE)
    //checkpoint超时时间
    checkpointConfig.setCheckpointTimeout(10000L)
    //checkpoint最大次数
    checkpointConfig.setMaxConcurrentCheckpoints(2)
    //第一次checkpoint的尾和第二次checkpoint的首之间的时间间隔
    checkpointConfig.setMinPauseBetweenCheckpoints(3000)

    val tool = ParameterTool.fromPropertiesFile(getClass.getResource("/jdbc.properties").getPath)

    //设置mysql数据源
    val mysqlSource = MySqlSource.builder[String]
      .hostname(tool.get("jdbc.host"))
      .port(tool.getInt("jdbc.port"))
      .username(tool.get("jdbc.username"))
      .password(tool.get("jdbc.password"))
      //databaseList必须配置
      .databaseList("gmall_flink")
      //高版本的cdc, databaseList和tableList参数都需要设置
      //如果想要监控库下的所有表, 可以写库名.*的方式
      .tableList("gmall_flink.base_trademark")
      .startupOptions(StartupOptions.initial())
      .deserializer(new JsonDebeziumDeserializationSchema())
      .build()

    env
      //添加mysql数据源
      .fromSource(mysqlSource, WatermarkStrategy.noWatermarks[String], "MySQL Source")
      //直接打印输出
      .print()

    env.execute("Flink_CDCWithCheckpoint Job")
  }

}
