package com.king.app

import com.alibaba.fastjson.{JSON, JSONObject}
import com.king.bean.TableProcess
import com.king.config.{DBServerConstant, StateBackendConfig}
import com.king.function.{CustomerDeseriallization, TableProcessFunction}
import com.king.util.MyKafkaUtil
import com.ververica.cdc.connectors.mysql.MySqlSource
import com.ververica.cdc.connectors.mysql.table.StartupOptions
import org.apache.flink.api.common.restartstrategy.RestartStrategies
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.CheckpointingMode
import org.apache.flink.streaming.api.environment.CheckpointConfig
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}

/**
 * @Author: KingWang
 * @Date: 2022/1/9  
 * @Desc:
 **/


object BaseDBApp {


  def main(args: Array[String]): Unit = {

    //1. 获取执行环境
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    //1.1. 设置状态后端（ck保存路径）
//    env.setStateBackend(new FsStateBackend("hdfs://hadoop200:8020/flink/base_db_app/ck/"))
    val filePath = StateBackendConfig.getFileCheckPointDir("base_db_app")
    println("checkpoint路径：" + filePath)
    env.setStateBackend(new FsStateBackend(filePath))
    //1.2. 开启ck
    // 每 ** ms 开始一次 checkpoint
    env.enableCheckpointing(5000L)
    //设置模式为精确一次
    env.getCheckpointConfig.setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE) //1分钟
    // 确认 checkpoints 之间的时间会进行 ** ms
    env.getCheckpointConfig.setMinPauseBetweenCheckpoints(3000)
    // Checkpoint 必须在一分钟内完成，否则就会被抛弃
    env.getCheckpointConfig.setCheckpointTimeout(10000L)

    // 同一时间只允许一个 checkpoint 进行
    env.getCheckpointConfig.setMaxConcurrentCheckpoints(2)
    // 开启在 job 中止后仍然保留的 externalized checkpoints
    env.getCheckpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)

    env.setRestartStrategy(RestartStrategies.fixedDelayRestart(2, 10000L));

    //2. 消费kafka ods_base_db  主题数据创建流
    val topic = "ods_base_db"
    val groupId = "base_db_app_210325"
    val kafkaDS = env.addSource(MyKafkaUtil.getKafkaConsumer(topic,groupId))


    //3. 将每行数据转换为JSON对象，并过滤delete   主流
    val jsonObjDS = kafkaDS.map(x=> JSON.parseObject(x)).filter(x=> !"delete".equals(x.getString("type")) )


    //4. 使用FlinkCDC消费配置表，并处理          广播流
    val db = DBServerConstant.mysql_gmall_210325_realtime()
    val cdcSource = MySqlSource.builder()
      .hostname(db.hostname)
      .port(db.port)
      .username(db.username)
      .password(db.password)
      .databaseList("gmall-210315-realtime")
      .tableList("gmall-210315-realtime.table_process")
      .startupOptions(StartupOptions.initial())
      .deserializer(new CustomerDeseriallization())
      .build()

    val tableProcessDS= env.addSource(cdcSource)
    val mapState = new MapStateDescriptor[String,TableProcess]("map-state",classOf[String],classOf[TableProcess])
    val broadcastStream = tableProcessDS.broadcast(mapState)  //广播流

    //5. 连接主流和广播流
    val connectedStream = jsonObjDS.connect(broadcastStream)

    //6. 分流处理数据  广播流和主流(根据广播流数据进行处理)
    val hbaseTag = new OutputTag[JSONObject]("hbase-tag")
    val kafkaStream = connectedStream.process(new TableProcessFunction(hbaseTag,mapState))


    //7. 提取kafka数据和Hbase数据
    val hbaseStream = kafkaStream.getSideOutput[JSONObject](hbaseTag)


    //8. 将kafka数据写入kafka主题， 将hbase数据写入phoenix表
    kafkaStream.print("kafkaStream>>>>>>>>>>>>>>>")
    hbaseStream.print("hbaseStream>>>>>>>>>>>>>>>")


    //9. 启动任务
    env.execute("BaseDBApp")

  }



}
