package com.tech

import com.tech.common.{GlobalValue, KuduUtil, ZkUtil}
import com.tech.customer.{LoadResourceManager, MyForeachWriter, MyStreamingQueryListener}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{Dataset, SparkSession}

import scala.collection.mutable.ArrayBuffer

/**
 * 公司:天正聚合
 * 文件名称:Streaming
 * 作者:zhoumindong
 * 时间:2021/7/6
 */
object Streaming {
  Logger.getLogger("org").setLevel(Level.WARN)

  def main(args: Array[String]): Unit = {
    println("开始")
    //kafka的broker，有多个时中间用逗号隔开
    val broker = args(0)
    //订阅的topic，有多个时中间用逗号隔开
    val topics = args(1)
    //kuduMaster，有多个时中间用逗号隔开
    val kuduMaster = args(2)
    KuduUtil.kuduMaster = args(2)
    val spark = SparkSession.builder()
      .appName("Streaming")
      .config("spark.dynamicAllocation.enabled",value = false)
//      .master("local[*]")
      .getOrCreate()
    val manager = new LoadResourceManager()
    manager.loadOnce(spark)
    manager.load(spark)
    spark.streams.addListener(new MyStreamingQueryListener(spark, manager))
    val df = spark.readStream.format("kafka")
      .option("kafka.bootstrap.servers", broker)
//      .option("subscribe", "retailevent_uat")
      .option("subscribe", topics)
//      .option("startingOffsets", "{\"retailevent_uat\":{\"0\":1390,\"1\":1390,\"2\":1390,\"3\":1390}}")
//      .option("startingOffsets", "earliest")
      .option("startingOffsets", ZkUtil.getOffset(topics))
      .option("maxOffsetsPerTrigger", "1000")
//      .option("kafkaConsumer.pollTimeoutMs","5000")
      .load()
    import spark.implicits._
    val data: Dataset[(String, String, Int, Long)] = df.selectExpr("CAST(value AS STRING)", "CAST(topic AS STRING)", "CAST(partition AS int)", "CAST(offset AS long)").as[(String, String, Int, Long)]
    val result = data.mapPartitions(partition => {
      val buffer = ArrayBuffer[(String, String, Int, Long)]()
      partition.foreach(item => {
        val json = item._1
        val topic = item._2
        val partition = item._3
        val offset = item._4
        buffer.append((json, topic, partition, offset))
      })
      buffer.toIterator
    })
    val query = result.writeStream.format("foreach")
      .queryName("Streaming")
      .outputMode(OutputMode.Append).foreach(new MyForeachWriter(manager,broker,kuduMaster))
      .start()
    query.awaitTermination()
  }
}
