package tech.spiro.spark.streaming

import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.functions.{col, date_format}
import tech.spiro.log.PerfLogMessageFlatten

object StructuredStreamingKafkaExample {

  def processValue(v: String, t: String): (String, String) = {
    val flatten = new PerfLogMessageFlatten()
    val v1 = new String(flatten.flattenMessage(v.getBytes("UTF-8")), "UTF-8")
    (v1, t)
  }

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder
      .appName("kafkaLog")
      .master("local")
      .getOrCreate()

    import spark.implicits._
    val df = spark.readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "bddevk01.dbhotelcloud.com:9092,bddevk02.dbhotelcloud.com:9092")
      .option("subscribe", "perf_bestwesternstorage_raw")
      .option("maxOffsetsPerTrigger", 1000)
      .option("startingOffsets", "earliest")
      .load()

    val df1 = df.select(
      col("value").cast("string"),
      date_format(col("timestamp"), "yyyy-MM-dd").alias("dt")
    )

    val ds1 = df1.as[(String, String)]
    val ds2 = ds1.map(r => processValue(r._1, r._2)).as[(String, String)]
    val df2 = ds2.toDF("value", "dt")

    val ds = df2.writeStream
    .format("text")
    .option("checkpointLocation", "file:///D:/test_spark_7/checkpoint/")
    .option("path", "file:///D:/test_spark_7/output/")
    .partitionBy("dt")
    .start()
//  .option("compression", "gzip")
    ds.awaitTermination()
  }

}
