package cn.cihon.stream.wordcount

import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * Created by eeexiu on 16-11-20.
  */
object UpdateByKeyWordCount {

  def main(args: Array[String]): Unit = {

    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)

    Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

    // Create the context with a 1 second batch size
    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    val ssc = new StreamingContext(sparkConf, Seconds(1))
    ssc.checkpoint("/home/eeexiu/workspace/test_checkpoint")

    // Create a socket stream on target ip:port and count the
    // words in input stream of \n delimited text (eg. generated by 'nc')
    // Note that no duplication in storage level only for running locally.
    // Replication necessary in distributed scenario for fault tolerance.
    val lines = ssc.socketTextStream("localhost", 9999, StorageLevel.MEMORY_AND_DISK_SER)
    val words = lines.flatMap(_.split(" "))
    val wordCounts = words.map(x => (x, 1))

    /** updateStateByKey与mapWithState方法的区别是:
      * （1）updateStateByKey只会对value做操作，mapWithState则可能对key-value做操作。
      *
      * （2）updateStateByKey每次都会一直对结果累计记录到控制台，比如这一次只处理“eeexiu”这个单词，
      * 在这一个批次中，updateStateByKey还会继续去算之前的单词，虽然之前的单词数量并没有变化。
      *
      * （3）mapWithState只会注重当前处理的对象，比如这一次只处理“eeexiu”这个单词，在这一个批次中，
      * mapWithState不会去算之前没有发生变化的单词。
      */
      // "_"表示传参
    val runningCounts = wordCounts.updateStateByKey[Int](updateFunction _)
    runningCounts.print

    ssc.start()
    ssc.awaitTermination()
  }

  def updateFunction(newValues: Seq[Int], runningCount: Option[Int]): Option[Int] = {
    val newCount = newValues.foldLeft(0)((m:Int,n:Int) => m +n ) + runningCount.getOrElse(0)
    Some(newCount)
  }
}
