package zy.learn.demo.structuredstreaming.watermark

import java.sql.Timestamp

import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.OutputMode

/**
 * 除非有聚合操作，且为complete ouput mode，否则不支持排序。因为OutputMode是Complete，所以水印没有作用
 * Sorting is not supported on streaming DataFrames/Datasets,
 * unless it is on aggregated DataFrame/Dataset in Complete output mode
 */
object WatermarkSort {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().set("spark.sql.shuffle.partitions", "3")
    val spark = SparkSession.builder()
      .master("local[2]")
      .config(sparkConf)
      .appName("Watermark Sort")
      .getOrCreate()

    import spark.implicits._

    val lines = spark.readStream
      .format("socket") // 设置数据源
      .option("host", "co7-203")
      .option("port", 9999)
      .load
    /* 输入的数据
    * 2020-10-14 10:55:00,dog     # wartermark: 10:55:00 - 2 min = 10:53:00
    * 2020-10-14 11:00:00,cat     # wartermark: 11:00:00 - 2 min = 10:58:00
    * 2020-10-14 10:55:00,pig     # wartermark: 10:55:00 - 2 min = 10:53:00 < 10:58:00 => 10:58:00
    * 2020-10-14 11:05:00,cat     # wartermark: 11:00:00 - 2 min = 11:03:00
    * 2020-10-14 10:53:00,dog
    * 2020-10-14 11:07:00,monkey
    * 输出的结果
+------+---+
|word  |cnt|
+------+---+
|dog   |2  |
|cat   |2  |
|pig   |1  |
|monkey|1  |
+------+---+
    * */
    val wordsDF = lines.as[String].map(line => {
      val split = line.split(",")
      (Timestamp.valueOf(split(0)), split(1))
    }).toDF("ts", "word")

    import org.apache.spark.sql.functions._

    wordsDF
      .withWatermark("ts", "2 minutes")
      .createOrReplaceTempView("testtable")

    val wordsSort = spark.sql(
      """
        |select word, count(*) as cnt from testtable group by word order by cnt desc
        |""".stripMargin)

    val query = wordsSort.writeStream
      .format("console")
      .outputMode(OutputMode.Complete())
      .option("truncate", "false")
      .start()

    query.awaitTermination()
  }
}
