package com.dongline.te
import javafx.application.Application
import javafx.stage.Stage
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

class StreamFullWordCount extends  Application{



  /*

      累加，很多用户，需要统计充值多少钱
       */
  override def start(primaryStage: Stage): Unit = {

    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    //设置spark的内存
    conf.set("spark.testing.memory", "471859200")
    val ssc = new StreamingContext(conf, Seconds(1))
    //必须设置checkpoint
    ssc.checkpoint("D:/sdf")

    val lines = ssc.socketTextStream("192.168.0.31", 9999)
    // Split each line into words
    val words = lines.flatMap(_.split(" "))
   // import org.apache.spark.streaming.StreamingContext._ // not necessary since Spark 1.3
    // Count each word in each batch
    val pairs = words.map(word => (word, 1))
    val wordCounts = pairs.reduceByKey(_ + _)

    val count =wordCounts.updateStateByKey((seq:Seq[Int],option:Option[Int])=>{
      // 初始化一个变量
      var value = 0;
      // 该变量用于更新，加上上一个状态的值，这里隐含一个判断，如果有上一个状态就获取，如果没有就赋值为0
      value += option.getOrElse(0)
      // 遍历当前的序列，序列里面每一个元素都是当前批次的数据计算结果，累加上一次的计算结果
      for(elem <- seq){
        value +=elem
      }
      // 返回一个Option对象
      Option(value)
    })

    // Print the first ten elements of each RDD generated in this DStream to the console
    count.print()
    ssc.start()             // Start the computation
    ssc.awaitTermination()  // Wait for the computation to terminate
  }
}
