package com.archgeek.spark.examples.streaming

import org.apache.calcite.sql.SqlWindow
import org.apache.spark.SparkConf
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.{Column, SparkSession}
import org.apache.spark.sql.types.{DataTypes, StructType}

/**
 *
 *
 * Create by pizhihui on 2021-03-01
 */
object WordCountDemo {

  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()
    val appName = conf.get("spark.app.name")

    val spark = SparkSession.builder()
      .enableHiveSupport()
      .appName("StructuredKafkaWordCount")
      .getOrCreate()
    import spark.implicits._
    import org.apache.spark.sql.functions._


    // 处理 kafka 的流程.................

    val df = spark.readStream
      .option("kafka.bootstrap.servers", "host1:port1,host2:port2")
      .option("subscribe", "topic1")
      .format("kafka")
      .load()

    val lines = df.selectExpr("CAST(value as STRING)").as[String]


    val sf1 = DataTypes.createStructField("name", DataTypes.StringType, true)
    val st = DataTypes.createStructType(Array(sf1))


    val schema = new StructType()
      .add("body", st)
      .add("headers", st)

    val jsonCol = from_json(col("value"), schema)
    val rawDF2 = lines.select(jsonCol.as("parsed_value")).select("parsed_value.*")
    rawDF2.printSchema()
    rawDF2.createTempView("t_tmp_name")




    // ------ 普通 SQL

    val sqlDf = spark.sql("select name from t_tmp_name limit 1")
    // 去重
    sqlDf.withWatermark("time", "30").dropDuplicates(Seq("age"))


    // ------- 聚合 SQL
    val waterMarkDF = rawDF2.withWatermark("time", "30 minutes")
    val whereDF = waterMarkDF.where("age > 3")

    val aggExprList = Array(expr("age"), expr("name"))
     whereDF.groupBy(window($"age", "32", "12"), $"word")
       .agg(aggExprList.head, aggExprList.tail:_*)

    // ------- Join SQL
    val waterDF2 = rawDF2.withWatermark("timestamp", "10 minutes")
    val joinDF = waterDF2.join(sqlDf, Seq("name"), "join")

    val wordCountsDataFrame = spark.sql("select word, count(*) as total from words group by word")

    // 结果输出
    wordCountsDataFrame.writeStream
      .option("truncate", "false")
      .trigger(Trigger.ProcessingTime(1 * 1000))
      .outputMode("update") // complete,append
      .format("console")
      .start()

    spark.streams.awaitAnyTermination()



  }

}
