package com.shujia.streaming

import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.{DataFrame, SparkSession}

object Demo06StructuredStreaming {
  def main(args: Array[String]): Unit = {
    val spark: SparkSession = SparkSession
      .builder()
      .appName("Demo06StructureStreaming")
      .master("local[2]")
      .config("spark.sql.shuffle.partitions", "2")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._

    // StructureStreaming基于DF的流式计算
    val lineDS: DataFrame = spark
      .readStream
      .format("socket")
      .option("host", "master")
      .option("port", 7777)
      .load()

    val wordCntDF: DataFrame = lineDS
      .select(explode(split($"value", ",")) as "words")
      .groupBy($"words")
      .agg(count("*") as "cnt")

    wordCntDF
      .writeStream

      /**
       * Append：只能用于没有聚合的查询
       * Complete：只能用于有聚合操作的查询
       * Update：用于输出有更新的数据
       */
      .outputMode(OutputMode.Update())
      .format("console")
      .start()
      .awaitTermination()


  }

}
