package com.shujia.stream

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo2UpdateStateByKey {
  def main(args: Array[String]): Unit = {

    /**
      * 1、创建SparkContext
      *
      */
    val conf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("stream")

    val sc = new SparkContext(conf)

    /**
      * 2、创建SparkStreaming环境
      *
      * 指定batch的间隔时间
      */

    val ssc = new StreamingContext(sc, Durations.seconds(5))


    //设置checkpoint 用于保存之前的计算状态
    ssc.checkpoint("data/checkpoint")

    //读取数据
    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)

    val wordDS: DStream[String] = linesDS.flatMap(line => line.split(","))

    val kvDS: DStream[(String, Int)] = wordDS.map(word => (word, 1))

    /**
      * reduceByKey:按照key对value进行聚合，旨在一个batch中进行聚合
      *
      */

    /**
      * updateStateByKey: 每一次计算使用当前batch的数据更新之前的计算结果，得到最新的结果
      *
      */

    /**
      * seq: 每一个key当前Batch所有的value
      * option:  前面这个key的计算结果（状态），之前可能没有结果
      */

    val updateFun: (Seq[Int], Option[Int]) => Some[Int] = (seq: Seq[Int], option: Option[Int]) => {
      //1、计算当前batch单词的数量
      val currCount: Int = seq.sum

      //前面计算的单词的数量
      val lastCount: Int = option.getOrElse(0)

      //返回最新的单词的数量
      Some(currCount + lastCount)
    }


    val countDS: DStream[(String, Int)] = kvDS.updateStateByKey(updateFun)


    countDS.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()


  }

}
