package com.dtkavin.sparkstreaming

import org.apache.spark.{HashPartitioner, SparkConf}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

/**
  * Created by IntelliJ IDEA.
  * Programmer : John Zn
  * Date : 2016/4/20 0020
  * Time : 22:45
  * Discribtion : 统计DStream中多批次的RDD，最后做总和统计
  */
class StatisticSocketStreaming {

}

object StatisticSocketStreaming {

  def updateFun(iterator: Iterator[(String, Seq[Int], Option[Int])]): Iterator[(String, Int)] = {
    iterator.map(t => (t._1, t._2.sum + t._3.getOrElse(0)))
  }

  //  val updateFun = (iterator: Iterator[(String, Seq[Int], Option[Int])]) => {
  //    iterator.map(t =>(t._1, t._2.sum + t._3.getOrElse(0)))
  //  }

  def main(args: Array[String]) {
    val conf = new SparkConf().setAppName("StatisticSocketStreaming").setMaster("local[3]")
    val ssc = new StreamingContext(conf, Milliseconds(5000))
    ssc.checkpoint("d:/data/checkpoint")
    val socketStream = ssc.socketTextStream("log01", 8888, StorageLevel.MEMORY_AND_DISK_SER)
    val res = socketStream.flatMap(_.split(" ")).map((_, 1)).updateStateByKey(updateFun _, new HashPartitioner(ssc.sparkContext.defaultParallelism), true)
    res.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
