package com.shujia.spark.streaming

import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo08StructuredOnKafka {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.replace("$", ""))
      .master("local[2]") // 设置运行的方式
      .config("spark.sql.shuffle.partitions", "1")
      .getOrCreate()

    val df: DataFrame = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "master:9092,node1:9092,node2:9092")
      .option("subscribe", "spark_t1")
      .option("startingOffsets", "earliest")
      .option("kafka.group.id", "grp00001")
      .load()

    import spark.implicits._
    import org.apache.spark.sql.functions._

    df
      .selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
      .select(explode(split($"value", ",")) as "word")
      .groupBy($"word" as "key")
      .agg(count("*").cast("String") as "value")
      .select(concat_ws(",",$"key", $"value") as "value")
      .writeStream
      .outputMode("update")
      .format("kafka")
      .option("kafka.bootstrap.servers", "master:9092,node1:9092,node2:9092")
      .option("topic", "word_cnt_02")
      .option("checkpointLocation", "spark/data/stream/struct/ck")
      .start()
      .awaitTermination()
  }

}
