package com.sunzm.spark.streaming

import com.sunzm.spark.streaming.listener.kafkaStreamingListener
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.util.ShutdownHookManager
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream, MapWithStateDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies
import org.apache.spark.streaming.{Durations, State, StateSpec, StreamingContext}

/**
 *
 * 使用ShutdownHook在退出程序前保存状态数据
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-06-18 14:15
 */
object MapWithStateRecoverShutdownHookDemo {
  //状态数据需要保存在 checkpointDirectory 目录中
  private val checkpointDirectory = "data/spark/streaming/ck/20210618001/"
  //初始化状态数据的保存目录
  private val initStateDirectory = "data/spark/streaming/state/20210618001/"
  //保存状态数据的目录
  private val saveStateFlagDirectory = "data/spark/streaming/savestate/20210618001/"

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName(s"${this.getClass.getSimpleName.stripSuffix("$")}")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .setMaster("local[*]")

    val ssc: StreamingContext = new StreamingContext(conf, Durations.seconds(5))

    //设置checkpoint目录
    ssc.checkpoint(checkpointDirectory)

    processFunc(ssc)

    ssc.start()

    //ssc.awaitTermination()

    //本地模拟停止
    Thread.sleep(30 * 1000L)
    ssc.stop(true, true)
  }

  def processFunc(ssc: StreamingContext) = {

    val hadoopConf = new Configuration()
    val initStatePath = new Path(initStateDirectory)
    val fs = initStatePath.getFileSystem(hadoopConf)
    //为 mapWithState 算子初始化 状态数据
    val initialRDD: RDD[(String, Int)] = if (fs.exists(initStatePath)) {
      println("从HDFS恢复状态数据...")
      ssc.sparkContext.textFile(initStateDirectory)
        .map(line => {
          val fields = line.split(",")
          val word = fields(0)
          val count = fields(1).toInt

          (word, count)
        })
    } else {
      ssc.sparkContext.parallelize(Array.empty[(String, Int)])
    }

    //初始化完成后，应该把这个目录删除，以免某些情况下产生异常
    fs.deleteOnExit(initStatePath)
    val saveStateFlagPath = new Path(saveStateFlagDirectory)
    fs.delete(saveStateFlagPath, true)
    fs.deleteOnExit(saveStateFlagPath)

    fs.close()

    val kafkaParams = Map[String, Object](
      //"bootstrap.servers" -> "localhost:9092,anotherhost:9092",
      "bootstrap.servers" -> "82.156.210.70:9093",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "use_a_separate_group_id_for_each_stream",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    // bin/kafka-console-producer.sh --bootstrap-server 10.0.8.11:9092 --topic my-topic
    val topics = Array("my-topic")
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      ssc,
      LocationStrategies.PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )

    //添加一个监听器，每完成一个批次，更新一次偏移量
    ssc.addStreamingListener(new kafkaStreamingListener(kafkaDStream))

    //kafkaDStream.map(record => (record.key, record.value))

    val pairsRDD: DStream[(String, Int)] = kafkaDStream.map(_.value()).flatMap(_.split(","))
      .map(word => {
        (word, 1)
      })

    val spec: StateSpec[String, Int, Int, String] = StateSpec.function(mappingFunc)
      //初始化的状态数据
      .initialState(initialRDD)

    val mapWithStateDStream: MapWithStateDStream[String, Int, Int, String] = pairsRDD.mapWithState(spec)

    //mapWithStateDStream就是处理后的结果数据
    mapWithStateDStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        rdd.foreach(line => {
          println(s"结果数据: ${line}")
        })
      } else {
        println(s"结果数据为空!")
      }

    })

    //stateSnapshots返回的是包含key的状态数据
    val stateDStream: DStream[(String, Int)] = mapWithStateDStream.stateSnapshots()
    saveStateFunc(stateDStream)
  }

  val mappingFunc = (word: String, one: Option[Int], state: State[Int]) => {
    val sum = one.getOrElse(0) + state.getOption.getOrElse(0)
    val output = s"单词:${word}, 数量: ${sum}"
    state.update(sum)
    output
  }

  def saveStateFunc(stateDStream: DStream[(String, Int)]) = {
    stateDStream.foreachRDD(rdd => {

      val conf = new Configuration()
      val saveStateFlagPath = new Path(saveStateFlagDirectory)
      val fs: FileSystem = saveStateFlagPath.getFileSystem(conf)

      if (fs.exists(saveStateFlagPath)) {

        val shutdownHookManager: ShutdownHookManager = ShutdownHookManager.get()
        //Spark的优先级为40 （FileSystem.SHUTDOWN_HOOK_PRIORITY + 30 = 10 + 30 = 40）
        /**
         * StreamingContext.SHUTDOWN_HOOK_PRIORITY:
         * private val SHUTDOWN_HOOK_PRIORITY = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY + 1
         * 50 + 1 = 51
         */
        shutdownHookManager.addShutdownHook(new SaveStateRunnable(rdd), 101)

      }

    })
  }

  private class SaveStateRunnable(stateRDD: RDD[(String, Int)]) extends Runnable {
    override def run(): Unit = {
      println("程序退出前的后处理工作....")
      println("开始保存状态数据....")

      stateRDD.map {
        case (word, count) => s"${word},${count}"
      }.saveAsTextFile(initStateDirectory)

      println("保存状态数据完成!")

    }
  }

}
