package com.yanggu.bigdata.realtime.app.dwd

import com.alibaba.fastjson.JSONObject
import com.ververica.cdc.connectors.mysql.source.MySqlSource
import com.ververica.cdc.connectors.mysql.table.StartupOptions
import com.yanggu.bigdata.realtime.app.BroadcastObject.tableProcessMapStateDescriptor
import com.yanggu.bigdata.realtime.app.OutputTagObject.dimHbaseOutputTag
import com.yanggu.bigdata.realtime.app.function.{HbaseDimRichSinkFunction, TableProcessBroadcastProcessFunction}
import com.yanggu.bigdata.realtime.app.ods.cdc.ConsumerDeserialization
import com.yanggu.bigdata.realtime.common.GmallConfig.KAFKA_BROKER_LIST
import com.yanggu.bigdata.realtime.utils.KafkaUtil
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.java.utils.ParameterTool
import org.apache.flink.streaming.api.scala._
import org.apache.kafka.clients.producer.ProducerRecord

import java.lang

//数据流: 业务数据 -> nginx -> 业务后台服务器 -> MySQL -> FlinkCDC -> Kafka(ods_base_db) -> Flink(BaseDBApp) -> Kafka(dwd)和Hbase(DIM)
//程 序:  mock-db -> mysql -> FlinkCDC -> Kafka(zk) -> Flink(BaseDBApp) -> Kafka(zk)/phoenix(hbase、zk、hdfs)
//根据配置表(TableProcess)将业务数据进行动态分流(维度表和事实表), 维度表数据到Hbase, 事实表数据到kafka(dwd)
//这里使用到了cdc, 需要配置checkpoint
object BaseDBApp {

  def main(args: Array[String]): Unit = {
    val env = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val tool = ParameterTool.fromPropertiesFile(getClass.getResourceAsStream("/jdbc.properties"))

    //设置table_process数据源
    val mysqlSource = MySqlSource.builder[String]
      .hostname(tool.get("jdbc.host"))
      .port(tool.getInt("jdbc.port"))
      .username(tool.get("jdbc.username"))
      .password(tool.get("jdbc.password"))
      .databaseList("gmall_realtime")
      .tableList("gmall_realtime.table_process")
      //这里需要写initial, 因为每次重启需要重新加载配置表数据
      .startupOptions(StartupOptions.initial())
      //自定义反序列化器
      .deserializer(new ConsumerDeserialization)
      .build()

    //配置表的广播流
    val tableProcessBroadcastStream = env
      //添加mysql数据源
      .fromSource(mysqlSource, WatermarkStrategy.noWatermarks[String], "TableProcess MySQL Source")
      .broadcast(tableProcessMapStateDescriptor)

    //主流数据流(Kafka ods业务表数据)
    val dataStream = KafkaUtil.getKafkaDataStream(env, KAFKA_BROKER_LIST, "ods_base_db", "ods_dwd_base_db_app")

    //主流connect广播流, 然后调用process方法传入BroadcastProcessFunction, 重写processElement和processBroadcastElement方法
    //主流中的数据是Kafka事实表, 侧输出流中的数据的Hbase维度表
    val value = dataStream
      .connect(tableProcessBroadcastStream)
      .process(new TableProcessBroadcastProcessFunction)

    value.print("Kafka>>>>>")
    //这里动态输入Kafka的topic
    value.addSink(KafkaUtil.getKafkaSink(KAFKA_BROKER_LIST, (data: JSONObject, _: lang.Long) => {
      val topic = data.getString("sinkTable")
      val jsonData = data.getJSONObject("after")
      new ProducerRecord[Array[Byte], Array[Byte]](topic, jsonData.toJSONString.getBytes)
    }))
    value.getSideOutput(dimHbaseOutputTag).print("Hbase>>>>>")
    //获取Hbase数据
    value.getSideOutput(dimHbaseOutputTag).addSink(new HbaseDimRichSinkFunction)

    //启动Job
    env.execute("BaseDBApp Job")

  }

}
