package com.sunzm.spark.streaming

import org.apache.commons.lang3.time.DateFormatUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, MapWithStateDStream}
import org.apache.spark.streaming.scheduler.{StreamingListener, StreamingListenerBatchCompleted}
import org.apache.spark.streaming.{Durations, State, StateSpec, StreamingContext, Time}

/**
 * SparkStreaming Listener示例程序
 */
object StreamingListenerDemo {
  //状态数据需要保存在 checkpointDirectory 目录中
  private val checkpointDirectory = "data/spark/streaming/ck/20210614001/"

  //手动停止任务标记文件
  private val stopFlagDirectory = "data/spark/streaming/stop/20210614001/"

  //初始化状态数据的保存目录
  private val initStateDirectory = "data/spark/streaming/state/20210614001/"

  //保存状态数据的标记目录
  private val saveStateFlagDirectory = "data/spark/streaming/savestate/20210614001/"

  def main(args: Array[String]): Unit = {
    //自动重启或者没有改动代码重启，可以让程序自动从 checkpointDirectory 恢复
    //手动重启，可以通过设置标记的方式
    val ssc = StreamingContext.getOrCreate(checkpointDirectory, () => functionToCreateContext)
    //val ssc = functionToCreateContext

    ssc.start()
    //ssc.awaitTermination()
    stopByMarkFile(ssc, stopFlagDirectory)
  }

  // Function to create and setup a new StreamingContext
  def functionToCreateContext(): StreamingContext = {
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .setMaster("local[*]")

    val ssc = new StreamingContext(conf, Durations.seconds(5))

    processFunc(ssc)

    //设置checkpoint目录
    ssc.checkpoint(checkpointDirectory)
    ssc
  }

  def processFunc(ssc: StreamingContext) = {

    //为了防止异常清空，先把 stopFlagDirectory 清空
    val hadoopConf = new Configuration()
    val stopFlagPath = new Path(stopFlagDirectory)
    val saveStateFlagPath = new Path(saveStateFlagDirectory)
    val initStatePath = new Path(initStateDirectory)
    val fs = stopFlagPath.getFileSystem(hadoopConf)

    if (fs.exists(stopFlagPath)) {
      fs.delete(stopFlagPath, true)
    }

    if (fs.exists(saveStateFlagPath)) {
      fs.delete(saveStateFlagPath, true)
    }

    //为 mapWithState 算子初始化 状态数据
    val initialRDD: RDD[(String, Int)] = if (fs.exists(new Path(initStateDirectory))) {
      ssc.sparkContext.textFile(initStateDirectory)
        .map(line => {
          val fields = line.split(",")
          val word = fields(0)
          val count = fields(1).toInt

          (word, count)
        })
    } else {
      ssc.sparkContext.parallelize(Array.empty[(String, Int)])
    }

    val lines = ssc.socketTextStream("82.156.210.70", 9999)

    val pairsRDD: DStream[(String, Int)] = lines.flatMap(_.split(","))
      .map(word => {
        (word, 1)
      })

    val spec: StateSpec[String, Int, Int, String] = StateSpec.function(mappingFunc)

    if (fs.exists(initStatePath)) {
      //如果使用了initialState，就只能手动恢复状态数据了
      spec.initialState(initialRDD)
    }

    fs.close()

    val mapWithStateDStream: MapWithStateDStream[String, Int, Int, String] = pairsRDD.mapWithState(spec)

    //mapWithStateDStream就是处理后的结果数据
    mapWithStateDStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        rdd.foreach(line => {
          println(s"结果数据: ${line}")
        })
      } else {
        println(s"结果数据为空!")
      }

    })

    val stateDStream: DStream[(String, Int)] = mapWithStateDStream.stateSnapshots()

    //判断是否需要保存状态数据
    stateDStream.foreachRDD(rdd => {

      if(!rdd.isEmpty()){

        val conf = new Configuration()
        val saveStateFlagPath = new Path(saveStateFlagDirectory)
        val fs: FileSystem = saveStateFlagPath.getFileSystem(conf)

        if (fs.exists(saveStateFlagPath)) {

          println("程序即将退出,正在保存状态数据....")

          val stateRDD: RDD[String] = rdd.map {
            case (word, count) => s"${word},${count}"
          }

          fs.deleteOnExit(new Path(initStateDirectory))

          stateRDD.saveAsTextFile(initStateDirectory)
          println("状态数据保存完成!")
        }

        fs.close()
      }
    })

    ssc.addStreamingListener(new CusStreamingListener(stateDStream))
  }

  def stopByMarkFile(ssc: StreamingContext, stopFlagDirectory: String): Unit = {
    val intervalMills = Durations.seconds(10).milliseconds // 每隔10秒扫描一次消息是否存在
    var isStop = false
    //  val hadoop_master = "hdfs://localhost:9000/stop"//判断消息文件是否存在，如果存在就
    while (!isStop) {
      isStop = ssc.awaitTerminationOrTimeout(intervalMills)

      val conf = new Configuration()
      val stopFlagPath = new Path(stopFlagDirectory)
      val fs = stopFlagPath.getFileSystem(conf)

      if (!isStop && fs.exists(stopFlagPath)) {

        //创建保存状态的标记文件
        fs.create(new Path(saveStateFlagDirectory), true)

        println("即将关闭sparkstreaming程序.....")

        ssc.stop(true, true)

        //删除任务停止标记文件
        fs.delete(stopFlagPath, true)
        fs.delete(new Path(saveStateFlagDirectory), true)

        fs.close()
      } else {
        println("***********未检测到有停止信号*****************")
      }
    }

  }

  val mappingFunc = (word: String, one: Option[Int], state: State[Int]) => {
    val sum = one.getOrElse(0) + state.getOption.getOrElse(0)
    val output = s"单词:${word}, 数量: ${sum}"
    state.update(sum)
    output
  }

  private class CusStreamingListener(stateDStream: DStream[(String, Int)]) extends StreamingListener {
    /**
     * 调用完onOutputOperationCompleted方法才会调用这个方法，表示整个批次处理完成
     *
     * @param batchCompleted
     */
    override def onBatchCompleted(batchCompleted: StreamingListenerBatchCompleted): Unit = {
      val time: Time = batchCompleted.batchInfo.batchTime
      val milliseconds = time.milliseconds

      val timeStr = DateFormatUtils.format(milliseconds, "yyyy-MM-dd HH:mm:ss")

      println(s"[${timeStr}] onBatchCompleted called, 这里可以保存kafka的偏移量")
    }
  }

}
