package com.sunzm.spark.streaming

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, MapWithStateDStream}
import org.apache.spark.streaming.{Durations, State, StateSpec, StreamingContext}

/**
 * 数据恢复示例
 *
 * 如果没有修改代码，直接从checkpoint恢复，运行之前的旧代码
 */
object MapWithStateRecoverDemo {
  //状态数据需要保存在 checkpointDirectory 目录中
  private val checkpointDirectory = "data/spark/streaming/ck/20210612002/"
  //初始化状态数据的保存目录
  private val initStateDirectory = "data/spark/streaming/state/20210612002/"
  //任务停止标记文件
  private val stopFlagDirectory = "data/spark/streaming/stop/20210612002/"
  //保存状态数据的目录
  private val saveStateFlagDirectory = "data/spark/streaming/savestate/20210612002/"

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .setMaster("local[*]")

    val ssc = new StreamingContext(conf, Durations.seconds(5))

    //设置checkpoint目录
    ssc.checkpoint(checkpointDirectory)

    processFunc(ssc)

    ssc.start()
    //ssc.awaitTermination()
    stopByMarkFile(ssc, stopFlagDirectory)
  }

  def stopByMarkFile(ssc: StreamingContext, stopFlagDirectory: String): Unit = {
    val intervalMills = Durations.seconds(10).milliseconds // 每隔10秒扫描一次消息是否存在
    var isStop = false
    //  val hadoop_master = "hdfs://localhost:9000/stop"//判断消息文件是否存在，如果存在就
    while (!isStop) {
      isStop = ssc.awaitTerminationOrTimeout(intervalMills)

      val conf = new Configuration()
      val stopFlagPath = new Path(stopFlagDirectory)
      val fs = stopFlagPath.getFileSystem(conf)

      if (!isStop && fs.exists(stopFlagPath)) {
        //创建保存状态的标记文件
        fs.create(new Path(saveStateFlagDirectory),  true)
        println("10 秒后开始关闭sparkstreaming程序.....")
        Thread.sleep(intervalMills)
        ssc.stop(true, true)

        //删除任务停止标记文件
        fs.delete(stopFlagPath, true)

        fs.close()
      } else {
        println("***********未检测到有停止信号*****************")
      }
    }

  }

  def processFunc(ssc: StreamingContext) = {

    //为了防止异常清空，先把 stopFlagDirectory 和 saveStateFlagDirectory 清空
    val hadoopConf = new Configuration()
    val stopFlagPath = new Path(stopFlagDirectory)
    val saveStateFlagPath = new Path(saveStateFlagDirectory)
    val fs = stopFlagPath.getFileSystem(hadoopConf)

    if(fs.exists(saveStateFlagPath)){
      fs.delete(saveStateFlagPath, true)
    }

    if(fs.exists(stopFlagPath)){
      fs.delete(stopFlagPath, true)
    }

    //为 mapWithState 算子初始化 状态数据
    val initialRDD: RDD[(String, Int)] = if(fs.exists(new Path(initStateDirectory))){
      ssc.sparkContext.textFile(initStateDirectory)
        .map(line => {
          val fields = line.split(",")
          val word = fields(0)
          val count = fields(1).toInt

          (word, count)
        })
    }else{
      ssc.sparkContext.parallelize(Array.empty[(String, Int)])
    }

    //初始化完成后，应该把这个目录删除，以免某些情况下产生异常
    fs.delete(new Path(initStateDirectory), true)

    fs.close()

    val lines = ssc.socketTextStream("82.156.210.70", 9999)

    val pairsRDD: DStream[(String, Int)] = lines.flatMap(_.split(","))
      .map(word => {
        (word, 1)
      })

    val spec: StateSpec[String, Int, Int, String] = StateSpec.function(mappingFunc)
      //初始化的状态数据
      .initialState(initialRDD)

    val mapWithStateDStream: MapWithStateDStream[String, Int, Int, String] = pairsRDD.mapWithState(spec)

    //mapWithStateDStream就是处理后的结果数据
    mapWithStateDStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        rdd.foreach(line => {
          println(s"结果数据: ${line}")
        })
      } else {
        println(s"结果数据为空!")
      }

    })

    //stateSnapshots返回的是包含key的状态数据
    val stateDStream: DStream[(String, Int)] = mapWithStateDStream.stateSnapshots()

    stateDStream.foreachRDD(rdd => {

      val conf = new Configuration()
      val path = new Path(saveStateFlagDirectory)
      val fs: FileSystem = path.getFileSystem(conf)

      if (fs.exists(path)) {

        println("程序即将退出,正在保存状态数据....")

        fs.deleteOnExit(new Path(initStateDirectory))

        fs.close()

        rdd.map {
          case (word, count) => s"${word},${count}"
        }.saveAsTextFile(initStateDirectory)

        println("状态数据保存完成!")
      }

    })
  }

  val mappingFunc = (word: String, one: Option[Int], state: State[Int]) => {
    val sum = one.getOrElse(0) + state.getOption.getOrElse(0)
    val output = s"单词:${word}, 数量: ${sum}"
    state.update(sum)
    output
  }
}
