package com.tech

import com.tech.common.KafkaUtil
import com.tech.config.ApplicationConfig
import com.tech.consumer.{LoadResourceManager, MyForeachWriter, MyStreamingQueryListener}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.OutputMode

/**
 * 公司:天正聚合
 * 文件名称:Streaming
 * 作者:zhoumindong
 * 时间:2021/7/6
 */
object Streaming {
  Logger.getLogger("org").setLevel(Level.WARN)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession
      .builder()
      .appName("MarketingAutomation_" + args(0))
      .config("spark.dynamicAllocation.enabled", value = false)
      .config("spark.serializer","org.apache.spark.serializer.KryoSerializer")
//      .master("local[*]")
      .getOrCreate()

    val manager = new LoadResourceManager()
    manager.load4D365(spark)
    manager.loadDimDataGroupDetail(spark)
    manager.loadOnlyOnce(spark)
    spark.streams.addListener(new MyStreamingQueryListener(spark, manager))

    val topics = args(0)

    val df = spark.readStream.format("kafka")
      .option("kafka.bootstrap.servers", ApplicationConfig.KAFKA_BROKER)
      .option("subscribe", topics)
      // {\"retailevent\":{\"0\":-1,\"1\":-1,\"2\":10399552,\"3\":-1}}
      .option("startingOffsets", if (ApplicationConfig.KAFKA_STARTING_OFFSETS.equalsIgnoreCase("kudu")) KafkaUtil.getOffsetFromKudu(topics) else ApplicationConfig.KAFKA_STARTING_OFFSETS)
      .option("maxOffsetsPerTrigger", "10000")
      .load()
    import spark.implicits._
    val query = df
      .selectExpr("CAST(value AS STRING)", "CAST(topic AS STRING)", "CAST(partition AS int)", "CAST(offset AS long)").as[(String, String, Int, Long)]
      .writeStream.format("foreach")
      .outputMode(OutputMode.Append)
      .foreach(new MyForeachWriter(manager))
      .start()
    query.awaitTermination()
  }
}

