package sparkstreaming.lesson03

import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Created by Administrator on 2018/5/11.
  *
  * Driver服务：
  *     上一次 运行结果，状态
  * Driver服务
  *    新的数据
  *
  */
object UpdateStateBykeyWordCount {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("NetWordCount")
    val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(2))
    ssc.checkpoint("hdfs://hadoop1:9000/streamingcheckpoint")
    /**
      * 数据的输入
      */
    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop1",9999)

    /***
      * 数据的处理
      *
      * Option:
      *   Some:有值
      *   None：没有值
      *   updateFunc: (Seq[V], Option[S]) => Option[S]
      *   数据的输入：
      *   you,1
      *   you,1
      *   jump,1
      *
      *   ByKey:分组
      *   you,{1,1}
      *   jump,{1}
      *
      *   values:Seq[Int]   List{1,1}
      *
      *   state:Option[Int]  上一次这个单词出现了多少次  None  Some 2
      */
//    var f=(values:Seq[Int],state:Option[Int])=>{
//      val currentCount = values.sum
//      val lastCount = state.getOrElse(0)
//      Some(currentCount+lastCount)
//    }
//      .updateStateByKey(f)

    val wordCountDStream = dstream.flatMap(_.split(","))
      .map((_, 1))
      .updateStateByKey((values: Seq[Int], state: Option[Int]) => {
        val currentCount = values.sum
        val lastCount = state.getOrElse(0)
        Some(currentCount + lastCount)
      })


    /*
      数据的输出
     */
    wordCountDStream.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

}
