package db.app

import com.cw.realtime.common.base.FlinkRunner.{RunnerConfig, run}
import com.cw.realtime.common.bean.TableProcessDwd
import com.cw.realtime.common.constant.Constant._
import com.cw.realtime.common.util.FlinkSourceUtil
import db.function.BcProcFunction4BaseDb
import io.circe._
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.state.MapStateDescriptor
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.connector.base.DeliveryGuarantee
import org.apache.flink.connector.kafka.sink.{KafkaRecordSerializationSchema, KafkaSink}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.kafka.clients.producer.ProducerRecord

import java.lang

object DwdBaseDbApp {


  def main(args: Array[String]): Unit = {
    implicit val config = RunnerConfig("dwd_base_db", TOPIC_DB, 10019)
    run { (env, ds) =>


      val jsonObjectStream = etl(ds)

      val tableProcessStream = fromMysqlCdc(env)


      val bcDesc = new MapStateDescriptor("dwd_state", classOf[String], classOf[TableProcessDwd])
      val broadcastStream = tableProcessStream.broadcast(bcDesc)

      // 过滤包含符合type、table的数据
      val filteredStream = jsonObjectStream.connect(broadcastStream)
        .process(new BcProcFunction4BaseDb(bcDesc)).setParallelism(1)

      val extractedStream = extractFields(filteredStream)

      extractedStream.sinkTo(baseDbSink)


    }

  }


  def baseDbSink: KafkaSink[(String, Json)] = {
    KafkaSink.builder()
      .setBootstrapServers(KAFKA_BROKERS)
      .setRecordSerializer(new KafkaRecordSerializationSchema[(String, Json)] {
        type ContextType = KafkaRecordSerializationSchema.KafkaSinkContext

        override def serialize(
          element: (String, Json),
          context: ContextType, timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
          new ProducerRecord[Array[Byte], Array[Byte]](element._1, element._2.toString.getBytes)
        }
      })
      .setDeliveryGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
      .setTransactionalIdPrefix(s"cw-base_db" + System.currentTimeMillis())
      .setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "").build()
  }


  def etl(ds: DataStream[String]): DataStream[JsonObject] = {
    ds.flatMap {
      parser.parse(_) match {
        case Left(err) => println(err); None
        case Right(value) =>
          for {
            obj <- value.asObject
            _db <- obj("database")
            db <- _db.asString if "gmall" == db
          }
          yield obj
      }
    }
  }

  def fromMysqlCdc(env: StreamExecutionEnvironment): DataStream[TableProcessDwd] = {
    env.fromSource(
      FlinkSourceUtil.getMysqlSource(DATABASE_PROCESS, TABLE_PROCESS_DWD),
      WatermarkStrategy.noWatermarks(),
      "table_process_dwd"
    ).flatMap {
      parser.parse(_) match {
        case Left(err) => println(err); None
        case Right(value) =>
          value.as[TableProcessDwd].toOption
      }
    }
  }


  def extractFields(ds: DataStream[(JsonObject, TableProcessDwd)]): DataStream[(String, Json)] = {
    ds.flatMap {
      _ match {
        case (jsonObj, tpd) =>
          for {
            data <- jsonObj("data")
            dataObj <- data.asObject
            ts <- jsonObj("ts")
            splits = tpd.sinkColumns.split(",")
            extracted = dataObj.filterKeys(splits.contains).add("ts", ts)
          }
          yield tpd.sinkTable -> extracted.toJson
      }
    }
  }


}
