package com.shujia.stream

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object Demo2UpdataStateByKey {
  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("stream")

    val sc = new SparkContext(conf)


    val ssc = new StreamingContext(sc, Durations.seconds(5))


    //指定checkpoint地址
    ssc.checkpoint("spark/data/checkpoint")

    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    val wordsDS: DStream[String] = linesDS.flatMap(_.split(","))

    val kvDS: DStream[(String, Int)] = wordsDS.map((_, 1))

    /**
      * reduceByKey: 只会统计当前batch
      *
      * 有状态算子（状态： 之前计算的结果， 之前统计的单词的数量）
      *
      */


    /**
      * seq：当前batch每个单词后面的1
      * opt: 计算每个单词的计算结果   (状态)
      * 返回值： 返回新的单词数量
      *
      *
      * 需要指定checkpoint的地址，用于保存计算的状态
      *
      */

    def udatteFun(seq: Seq[Int], opt: Option[Int]): Option[Int] = {

      //统计当前batch的单词的数量
      val currCount: Int = seq.sum

      //获取之前单词统计的结果
      val lastCount: Int = opt.getOrElse(0)

      //返回最新单词的数量
      Some(currCount + lastCount)
    }


    val countDS: DStream[(String, Int)] = kvDS.updateStateByKey(udatteFun)



    countDS.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
