package scalaKafka

import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

object WordCountByKafkaWithState {
    /**
      * 需要从传入一个迭代器 返回一个迭代器
      */
    val updateFunc = (iter: Iterator[(String, Seq[Int], Option[Int])]) => {
        //iter.map(t => (t._1, t._2.sum + t._3.getOrElse(0)))
        iter.map{ case(x, y, z) => (x, y.sum + z.getOrElse(0))}
    }

    def main(args: Array[String]): Unit = {
        val conf=new SparkConf().setAppName("UpdateStateByKey").setMaster("local[*]")
        val ssc = new StreamingContext(conf,Seconds(2))

        //想要把历史数据累加那么就要结果保存起来
        ssc.checkpoint("./checkpoint")

        val zkQuorum="node2:2181,node3:2181,node4:2181"
        val groupId="g66"
        val topic=Map[String,Int]("tukkUpdate"->1)

        //创建DStream,kafkaDStream
        val data = KafkaUtils.createStream(ssc,zkQuorum,groupId,topic)//元组
        //处理数据
        val lines = data.map(_._2)
        val words = lines.flatMap(_.split(" "))
        val wordAndOne = words.map((_,1))
        val reduced=wordAndOne.updateStateByKey(updateFunc ,new HashPartitioner(ssc.sparkContext.defaultParallelism),true)//三个参数 迭代器  分区器   以及以后是否复用

        reduced.print()
        ssc.start()
        ssc.awaitTermination()











        ssc.start()
        ssc.awaitTermination()
    }


}
