package com.atguigu.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming07_State1 {
  def main(args: Array[String]): Unit = {

    //所谓的状态，其实就是将数据进行保存
    //默认情况下。SparkStreaming的操作是无状态的.
    //统计结果是周期的，只对当前周期有效.

    val conf: SparkConf = new SparkConf().setAppName("tarnsfrom").setMaster("local[*]")
    val ssc = new StreamingContext(conf,Seconds(3))
    ssc.checkpoint("checkpoint")


    //从网络端口获取数据,拿到一行字符串
    val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)
    //将数据进行扁平化处理
    val words: DStream[String] = socketData.flatMap(_.split(" "))
    //将数据结构进行转换
    val wordToOne: DStream[(String, Int)] = words.map((_,1))
    //聚合操作(value1+value2),reduceByKey是无状态的，
    /*val wordToCount: DStream[(String, Int)] = wordToOne.reduceByKey(_ + _)
    wordToCount.print()
*/
    //有状态操作其实就是缓冲区的操作
    //updateStateByKey方法需要传递参数，参数的类型就是函数类型（seq option）=>{option}
    val wordToCount: DStream[(String, Int)] = wordToOne.updateStateByKey(
      //seq表示不同采集周期的相同key的数据集合
      //option类型主要是为了解决空指针的问题
      //ERROR:The checkpoint directory has not been set. Please set it by StreamingContext.checkpoint().
      //所以springstreaming的有状态操作是基于检查点的.
      (seq: Seq[Int], buff: Option[Int]) => {
        Option(seq.sum + buff.getOrElse(0))//buff.getOrElse(0)相当于把原来缓冲区的数据拿出来，加上现在的sum再放回去.
      }
    )
    wordToCount.print()
    ssc.start()
    ssc.awaitTermination()


  }
}
