package org.example

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object SparkStreaming_1 {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
     // 5秒统计一次
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    //3.单词过滤
    // 设置检查点目录，用于保存累加状态
    ssc.checkpoint("checkpoint_dir")
     //数据处理
         val dStream: ReceiverInputDStream[String] = ssc.socketTextStream("172.16.104.23",8888)
     //  词频统计
         val lines: DStream[String] = dStream.flatMap(_.split(" "))
         val wordDStream: DStream[(String,Int)] = lines.map((_,1))
         val result = wordDStream.reduceByKey(_+_)
    //定义函数用于累计每个单词出现的次数
    // 统计每个单词的出现次数（当前批次）
         val wordCounts = lines.map(word => (word, 1))
     // 定义累加函数
         val updateFunc = (values: Seq[Int], state: Option[Int]) => {
         val  currentCount = values.sum
         val previousCount = state.getOrElse(0)
         Some(currentCount + previousCount)   }
    // 使用updateStateByKey进行累加统计
      val runningCounts = wordCounts.updateStateByKey(updateFunc)
    // 打印结果
     runningCounts.print()
     ssc.start()
     ssc.awaitTermination()
  }

}
