package com.sunzm.spark.structstream.source

import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.slf4j.{Logger, LoggerFactory}

/**
 *
 * Structured Streaming从kafka中读取数据
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-04 23:44
 */
object StructuredStreamingKafkaSourceDemo {
  private val logger: Logger = LoggerFactory.getLogger(this.getClass.getName.stripSuffix("$"))

  def main(args: Array[String]): Unit = {
    /*if (args.length < 3) {
      System.err.println("Usage: StructuredKafkaWordCount <bootstrap-servers> " +
        "<subscribe-type> <topics> [<checkpoint-location>]")
      System.exit(1)
    }

    val Array(bootstrapServers, subscribeType, topics, _*) = args

    val checkpointLocation =
      if (args.length > 3) args(3) else "/tmp/temporary-" + UUID.randomUUID.toString
    */

    val bootstrapServers = "82.156.210.70:9093"
    // "assign", "subscribe" or "subscribePattern"
    val subscribeType = "subscribe"
    val topics = "applog"
    val checkpointLocation = "data/spark/sql/streaming/ck/kfk/"

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 6)
      .config("spark.sql.shuffle.partitions", 6)
      .getOrCreate()

    import spark.implicits._

    // Create DataSet representing the stream of input lines from kafka
    val lines: Dataset[String] = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", bootstrapServers)
      .option(subscribeType, topics)
      /**
       * "earliest", "latest" (streaming only), or json string
       * """ {
       * "topicA":{"0":23,"1":-1},
       * "topicB":{"0":-2}
       * }
       * """
       *
       * default: "latest" for streaming, "earliest" for batch
       *
       *
       */
      .option("startingOffsets", "earliest")
      .load()
      .selectExpr("CAST(value AS STRING)")
      .as[String]
     /* .selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
      .as[(String, String)]*/

    // 过滤不为空的数据
    val resultDS: Dataset[String] = lines.filter(_.nonEmpty)

    // Start running the query that prints the running counts to the console
    val query = resultDS.writeStream
      .outputMode(OutputMode.Append())
      .format("console")
      .option("checkpointLocation", checkpointLocation)
      .start()

    query.awaitTermination()

  }
}
