package Demo3

import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by lenovo on 2017/12/6.
  * updateStateByKey算子：在流式的处理中记住之前的值进行累计，该方法会保存上次计算数据的状态，以供下次计算使用
  */
object Streamin_updateStateByKeyScala {
  def main(args: Array[String]) {
    System.setProperty("hadoop.home.dir", "E://hadoop-liyadong//hadoop-2.7.1")

    //定义匿名函数
    val updateFunc = (values:Seq[Int],state:Option[Int]) => {
      val now = values.sum
      val old = state.getOrElse(0)
      Some(now+old)
    }

    val conf = new SparkConf().setAppName("Streamin_updateStateByKeyScala").setMaster("local[2]").set("spark.testing.memory","2147480000")
   val sc = new SparkContext(conf)
    val ssc = new StreamingContext(sc,Seconds(5))
    ssc.checkpoint(".")

   val linesDStream = ssc.socketTextStream("hadoop1",2222)
   val wordDStream = linesDStream.flatMap(_.split(" ")).map(x=> (x,1))
  val stateDStream = wordDStream.updateStateByKey(updateFunc)
    stateDStream.print()
    ssc.start()
    ssc.awaitTermination()
  }
}
