package com.atguigu.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming10_Chackpoint {
  def main(args: Array[String]): Unit = {
    val ssc=StreamingContext.getOrCreate("cp",()=>{
      val conf: SparkConf = new SparkConf().setAppName("window").setMaster("local[*]")
      val ssc = new StreamingContext(conf,Seconds(3))
      ssc.checkpoint("cp")


      //从网络端口获取数据,拿到一行字符串
      val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)
      //将数据进行扁平化处理
      val words: DStream[String] = socketData.flatMap(_.split(" "))
      //将数据结构进行转换
      val wordToOne: DStream[(String, Int)] = words.map((_,1))


      //TODO 如果SparkStreaming程序异常关闭，那么重新启动后，等同于启动了新的采集功能，那么之前的数据都没有了
      //所以可以从检查点恢复.

      val wordToCount: DStream[(String, Int)] = wordToOne.updateStateByKey(
        //seq表示不同采集周期的相同key的数据集合
        //option类型主要是为了解决空指针的问题
        //ERROR:The checkpoint directory has not been set. Please set it by StreamingContext.checkpoint().
        //所以springstreaming的有状态操作是基于检查点的.
        (seq: Seq[Int], buff: Option[Int]) => {
          Option(seq.sum + buff.getOrElse(0))//buff.getOrElse(0)相当于把原来缓冲区的数据拿出来，加上现在的sum再放回去.
        }
      )
      wordToCount.print()
     ssc
    })



    ssc.start()
    ssc.awaitTermination()

  }

}
