package com.example.spark.streaming

import com.example.util.SparkUtil
import org.apache.spark.SparkContext
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * @title: WordCount
 * @projectName bigdata
 * @description: Streaming WordCount
 * @author leali
 * @date 2022/5/15 21:48
 */
object WordCount {

  val sparkContext: SparkContext = SparkUtil.initSimpleSparkContent("WordCount")
  val checkpointPath: String = "src/data/output/checkpoint"

  def StreamingWordCount(sparkContext: SparkContext): Unit = {

    //每隔5s划分一个批次
    val streamingContext = new StreamingContext(sparkContext, Seconds(5))
    GetDStream(streamingContext)
      .reduceByKey((_: Int) + (_: Int))
      .print()

    DealStreamingContext(streamingContext)
  }

  private def getStreamingAndCheckpoint(sparkContext: SparkContext, checkpointPath: String = checkpointPath): StreamingContext = {
    //每隔5s划分一个批次
    val streamingContext = new StreamingContext(sparkContext, Seconds(5))
    //The checkpoint directory has not been set. Please set it by StreamingContext.checkpoint().
    //注意:state存在checkpoint中
    streamingContext.checkpoint(checkpointPath)
    GetDStream(streamingContext)
      .updateStateByKey(
        (currentValues: Seq[Int], historyValue: Option[Int]) => {
          if (currentValues.nonEmpty) Some(currentValues.sum + historyValue.getOrElse(0)) else historyValue
        }).print()
    streamingContext
  }

  def StreamingWordCountByState(sparkContext: SparkContext): Unit = {
    DealStreamingContext(getStreamingAndCheckpoint(sparkContext))
  }

  def creatingFunc(): StreamingContext = getStreamingAndCheckpoint(sparkContext, checkpointPath)

  /**
   * getOrCreate 接受的是一个无惨函数
   * Only one SparkContext may be running in this JVM (see SPARK-2243).
   * To ignore this error, set spark.driver.allowMultipleContexts = true. The currently running SparkContext was created at:
   * Caused by: java.io.IOException: unexpected exception type
   * Illegal lambda deserialization
   * @param checkpointPath checkpoint
   */
  def StreamingRecoverState(checkpointPath: String = WordCount.checkpointPath): Unit = {
    DealStreamingContext(StreamingContext.getOrCreate(checkpointPath, creatingFunc _))
  }

  private def GetDStream(streamingContext: StreamingContext): DStream[(String, Int)] = {
    streamingContext
      .socketTextStream("node01", 9999)
      .flatMap((_: String).split(" "))
      .map((_: String, 1))
  }

  private def DealStreamingContext(streamingContext: StreamingContext): Unit = {
    /**
     * 启动并等待结束
     * 注意:流式应用程序启动之后需要一直运行等待手动停止/等待数据到来
     */
    streamingContext.start()
    streamingContext.awaitTermination()

    //优雅关闭
    streamingContext.stop(stopSparkContext = true, stopGracefully = true)
  }

  /**
   * 实际案例 -微博热搜 等
   * @param sparkContext SparkContext
   */
  def StreamingWindow(sparkContext: SparkContext):Unit={
    //每隔5s划分一个批次
    val streamingContext = new StreamingContext(sparkContext, Seconds(5))
    GetDStream(streamingContext)
      //   windowDuration :窗口长度/窗口大小,表示要计算最近多长时间的数据
    //   slideDuration : 滑动间隔,表示每隔多长时间计算一次
    //   注意:windowDuration和slideDuration必须是batchDuration的倍数
    //  每隔5s(滑动间隔)计算最近10s(窗口长度/窗口大小)的数据
    //reduceByKeyAndWindow(聚合函数,windowDuration,slideDuration)
    //.reduceByKeyAndWindow(_+_,Seconds(10),Seconds(5))
    .reduceByKeyAndWindow((a:Int,b:Int)=>a+b,Seconds(10),Seconds(5))
    //实际开发中需要我们掌握的是如何根据需求设置windowDuration和slideDuration
    //如:
    //每隔10分钟(滑动间隔slideDuration)更新最近24小时(窗口长度windowDuration)的广告点击数量
    // .reduceByKeyAndWindow((a:Int,b:Int)=>a+b,Minutes(60*24),Minutes(10))
      .print()

    DealStreamingContext(streamingContext)
  }
}
