package com.sunzm.spark.structstream

import org.apache.spark.sql.streaming.{OutputMode, StreamingQuery}
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.slf4j.{Logger, LoggerFactory}

/**
 *
 * Structured Streaming入门案例 word count
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-04 23:44
 */
object StructuredStreamingStartDemo {
  private val logger: Logger = LoggerFactory.getLogger(this.getClass.getName.stripSuffix("$"))

  def main(args: Array[String]): Unit = {
    /*if (args.length < 2) {
      System.err.println("Usage: StructuredStreamingStartDemo <hostname> <port>")
      System.exit(1)
    }

    val host = args(0)
    val port = args(1).toInt*/

    val host = "82.156.210.70"
    val port = 9999

    val spark: SparkSession = SparkSession
      .builder()
      .appName(this.getClass.getSimpleName.stripSuffix("$"))
      .master("local[*]")
      .config("spark.default.parallelism", 6)
      .config("spark.sql.shuffle.partitions", 6)
      .getOrCreate()

    import spark.implicits._

    //从一个socket端口创建一个DataFrame
    val lines: DataFrame = spark.readStream
      .format("socket")
      .option("host", host)
      .option("port", port)
      .load()

    //hello,spark,hello,flink
    //使用逗号切分单词
    val words: Dataset[String] = lines.as[String].flatMap(_.split(","))

    //分组统计单词个数（默认的字段名是value）
    //结构化流本身是带状态的
    val wordCounts: DataFrame = words.groupBy("value").count()

    //启动程序，将查询结果输出
    val query: StreamingQuery = wordCounts.writeStream
      /**
       * <ul>
       * <li> `append`: 只有新增的行才会被输出(没有聚合操作时才能使用) </li>
       * <li> `complete`: 没来一条数据，所有的数据都会被输出</li>
       * <li> `update`: 如果查询语句中没有聚合操作，就等价于 append 模式</li>
       * </ul>
       */
      .outputMode(OutputMode.Complete())
      //.outputMode(OutputMode.Update())
      //因为包含聚合操作，这里不能使用 Append 模式
      //.outputMode(OutputMode.Append())
      .format("console")
      .start()

    query.awaitTermination()

  }
}
