package com.niit.spark.streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream, ReceiverInputDStream}
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * Date:2025/5/22
 * Author：Ys
 * Description:
 */
object StreamingUpdataStateByKey {

  def main(args: Array[String]): Unit = {
    val ssc = new StreamingContext(new SparkConf().setMaster("local[*]").setAppName("StreamingKafka03"), Seconds(5))
    ssc.sparkContext.setLogLevel("ERROR")
    ssc.checkpoint("BD2") //将统计的状态进行存储，该存储要落盘(写入磁盘)

    val lines: ReceiverInputDStream[String] = ssc.socketTextStream("localhost", 9999)

    val wordMap: DStream[String] = lines.flatMap(_.split(" "))
    val wordOne: DStream[(String, Int)] = wordMap.map((_, 1))

    /*
      updateStateByKey:根据key进行更新状态，如果key不存在，那么就创建一个key，如果key存在，那么就更新key对应的value
        传递的参数含有两个值
         第一个值：表示相同Key的value的数据
         第二个值：表示缓冲区相同Key的value的数据，保存上次的统计结果

     */
    val stateDS: DStream[(String, Int)] = wordOne.updateStateByKey(
      (seq: Seq[Int], buff: Option[Int]) => {
        val newcount = buff.getOrElse(0) + seq.sum
        Option(newcount)
      })

    stateDS.print()



    ssc.start()
    ssc.awaitTermination()

  }

}
