package sparkstreaming.eighteenthday1.lesson3

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/***
  * 使用  UpdateStateByKey算子操作必须要设置checkpoint 目录 此目录必须在hdfs上 且必须要有写操作
  */
object UpdateStateByKeyWordCount {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[4]").setAppName(s"${this.getClass.getSimpleName}")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc, Seconds(2))

    ssc.checkpoint("hdfs://hadoop01:9000/streamingcheckpoint")

    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop01", 9999)

    /**
      * updateFunc: (Seq[V], Option[S]) => Option[S]
      * (values:Seq[Int], state:Option[Int])
      * values 指本次出现的次数  List{1,2, 1}
      * state : 上一次单词出现的次数 {5} None Some
      *
      */
    val wordCountDStream: DStream[(String, Int)] = dstream.flatMap {
      case line => {
        line.split(" ")
      }
    }.map {
      case word => {
        (word, 1)
      }
    }.updateStateByKey((values: Seq[Int], state: Option[Int]) => {
      val currentCount: Int = values.sum
      val lastCount: Int = state.getOrElse(0)
      Some(lastCount + currentCount)
    })

    wordCountDStream.print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
