package com.example.spark.structuredstreaming

import com.example.util.{SparkUtil, YamlUtil}
import org.apache.spark.sql._
import org.apache.spark.sql.streaming.Trigger
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}

import java.util

/**
 * @title: StructuredStreamingOperation
 * @projectName bigdata
 * @description: StructuredStreamingOperation
 *               http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html
 * @author leali
 * @date 2022/5/17 23:09
 */
object StructuredStreamingOperation {

  val spark: SparkSession = SparkUtil.initSimpleSparkSession(appName = "StructuredStreamingOperation")

  def OperationSocket(): Unit = {
    import spark.implicits._

    val df: DataFrame = spark.readStream
      .format("socket")
      .option("host", "node01")
      .option("port", 9999)
      .load()

    df.printSchema()

    /**
     *    df.show()
     * org.apache.spark.sql.AnalysisException: Queries with streaming sources must be executed with writeStream.start();
     */

    val ds: Dataset[String] = df.as[String]
    val result: Dataset[Row] = ds.flatMap((_: String).split(" "))
      .groupBy('value)
      .count()
      .orderBy('count.desc)

    //Step 3.输出结果
    result.writeStream
      .format("console")
      .outputMode("complete")
      //Step 4.启动并等待结束
      .start()
      .awaitTermination()

    //Step 5.关闭资源
    spark.stop()
  }

  /**
   * 回看这个是怎么测试的？
   */
  def OperateSourceRate(): Unit = {
    val df: DataFrame = spark.readStream
      .format("rate")
      //每秒生成数据条数
      .option("rowsPerSecond", "10")
      //每条数据生成间隔时间
      .option("rampUpTime", "0s")
      .option("numPartitions", "2")
      .load()

    DealStream(df)

    spark.stop()
  }

  private def DealStream(df: DataFrame): Unit = {
    df.writeStream
      .format("console")
      //Complete output mode not supported when there are no streaming aggregations
      //.outputMode("complete")
      .outputMode("append")
      //表示对列不进行截断,也就是对列内容全部展示
      .option("truncate", value = false)
      .start()
      .awaitTermination()
  }

  def OperateSourceFile(): Unit = {

    val csvSchema: StructType = StructType(List(
      StructField("name", StringType, nullable = true),
      StructField("age", IntegerType, nullable = true),
      StructField("hobby", StringType, nullable = true)
    ))

    val df: DataFrame = spark.readStream
      .option("sep", ";")
      .option("header", "false")
      //注意:流式处理对于结构化数据哪怕是有约束也需要单独指定
      .schema(csvSchema)
      .format("csv").load("src/data/input/persons")

    DealStream(df)

    spark.stop()
  }

  private def GetDataSetBySocket(): Dataset[String] = {
    import spark.implicits._
    spark.readStream
      .format("socket")
      .option("host", "node01")
      .option("port", 9999)
      .load()
      .as[String]
      .flatMap((_: String).split(" "))
  }

  def OperateOperation(): Unit = {

    import spark.implicits._
    //====DSL
    val wordsDS: Dataset[String] = GetDataSetBySocket()
    val result1: Dataset[Row] = wordsDS
      .groupBy('value)
      .count()
      .orderBy('count.desc)


    // ====SQL
    wordsDS.createOrReplaceTempView("t_words")
    val sql: String =
      """
        |select value,count(*) as counts
        |from t_words
        |group by value
        |order by counts desc
        |""".stripMargin
    val result2: DataFrame = spark.sql(sql)


    result1.writeStream
      .format("console")
      .outputMode("complete")
      .start()
    //.awaitTermination()//注意:后面还有代码要执行,所以这里需要注释掉

    result2.writeStream
      .format("console")
      //OutPutMode append-->default
      .outputMode("complete")
      .start()
      .awaitTermination()

    spark.stop()
  }

  def OperateSink(sinkLocation: String): Unit = {

    import spark.implicits._
    val result: Dataset[Row] = GetDataSetBySocket().groupBy('value)
      .count()
      .orderBy('count.desc)
    sinkLocation match {
      case "MEMORY" =>
         result.writeStream
          .format("memory")
          .queryName("t_result")
          .outputMode("complete")
          .start()
        //.awaitTermination()
        while (true) {
          spark.sql("select * from t_result").show
          Thread.sleep(3000)
        }
        //query.awaitTermination()
        spark.close()
      case "FOREACH" =>
        result.writeStream
          .foreachBatch((ds: Dataset[Row], batchId: Long) => {
            //自定义输出到控制台
            println("-------------")
            println(s"batchId-->:$batchId")
            println("-------------")
            ds.show()
            //自定义输出到MySQL
            val envInfo: util.LinkedHashMap[String, String] = YamlUtil.getEnvInfo("LOCAL_MYSQL")
            ds.coalesce(1)
              .write
              .mode(SaveMode.Overwrite)
              .format("jdbc")
              .option("driver", "com.mysql.cj.jdbc.Driver")
              .option("url", envInfo.get("url"))
              .option("user", envInfo.get("user"))
              .option("password", envInfo.get("password"))
              .option("dbtable", "t_struct_words")
              .save()
          })
          .outputMode("complete")
          .start()
          .awaitTermination()
        spark.close()
      case "FILE" => println("File")
      case "KAFKA" => println("KAFKA")
    }
  }

  def OperateSinkTriggerCheckpoint(): Unit = {
    import spark.implicits._
    val result: Dataset[Row] = GetDataSetBySocket()
//      .coalesce(1)
      /**
       * Continuous processing does not support Aggregate operations.;
       */
      .groupBy('value)
      .count()
    result.writeStream
      .format("console")
      .outputMode("complete")
      //触发间隔:
      //1.默认的不写就是:尽可能快的运行微批,Default trigger (runs micro-batch as soon as it can)
      //2.指定0也是尽可能快的运行-->default
      // .trigger(Trigger.ProcessingTime("0 seconds"))
      //3.指定时间间隔
      //.trigger(Trigger.ProcessingTime("5 seconds"))
      //4.触发1次
      //.trigger(Trigger.Once())
      //5.连续处理并指定Checkpoint时间间隔,实验的
      .trigger(Trigger.Continuous("1 second"))
      .option("checkpointLocation", "src/data/output/checkpoint" + System.currentTimeMillis())
      .start()
      .awaitTermination()

    spark.close()
  }
}
