package com.shujia.stream

import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo7SqlOnkafka {
  def main(args: Array[String]): Unit = {


    val spark: SparkSession = SparkSession
      .builder()
      .master("local[4]")
      .config("spark.sql.shuffle.partitions", 1)
      .appName("kafka")
      .getOrCreate()

    val kafkaDF: DataFrame = spark
      .readStream
      .format("kafka")
      .option("kafka.bootstrap.servers", "master:9092,node1:9092,node2:9092")
      .option("subscribe", "words")
      .load()


    /**
      *
      * |-- key: binary (nullable = true)
      * |-- value: binary (nullable = true)
      * |-- topic: string (nullable = true)
      * |-- partition: integer (nullable = true)
      * |-- offset: long (nullable = true)
      * |-- timestamp: timestamp (nullable = true)
      * |-- timestampType: integer (nullable = true)
      *
      */

    kafkaDF.printSchema()
    import org.apache.spark.sql.functions._
    import spark.implicits._

    //取出value转换成String类型
    val valueDF: DataFrame = kafkaDF.selectExpr("cast(value as String)")


    val counyDF: DataFrame = valueDF.select(explode(split($"value", ",")) as "word")
      .groupBy($"word")
      .agg(count($"word"))


    counyDF
      .writeStream
      .outputMode(OutputMode.Complete())
      .format("console")
      .start()
      .awaitTermination()


  }
}
