package org.example

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object spackstresm_1 {

    def main(args: Array[String]): Unit = {

      val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
       // 5秒统计一次
      val ssc = new StreamingContext(sparkConf, Seconds(5))
      // 设置检查点目录，用于保存累加状态
      ssc.checkpoint("checkpoint_dir") //数据处理
      val dStream: ReceiverInputDStream[String] = ssc.socketTextStream("172.16.104.38",8888)
      //词频统计
      val lines: DStream[String] = dStream.flatMap(_.split(" "))
//      val wordDStream: DStream[(String,Int)] = lines.map((_,1))
//      val result = wordDStream.reduceByKey(_+_) //定义函数用于累计每个单词出现的次数 // 统计每个单词的出现次数（当前批次）
      val wordCounts = lines.map(word => (word, 1))
      // 定义累加函数
      val updateFunc = (values: Seq[Int], state: Option[Int]) => {
        val currentCount = values.sum
        val previousCount = state.getOrElse(0)
        Some(currentCount + previousCount)
      }
// 使用updateStateByKey进行累加统计

      val runningCounts: DStream[(String, Int)] = wordCounts.updateStateByKey(updateFunc)

      // 统计前5名单词
      val top5 = runningCounts.transform(rdd => {
        val sortedRDD = rdd.map { case (word, count) => (count, word) }
          .sortByKey(false)
        val top5Array = sortedRDD.take(5)
        rdd.sparkContext.parallelize(top5Array.map { case (count, word) => (word, count) })
      })

      // 打印前5名结果
      top5.print()

      ssc.start()
      ssc.awaitTermination()

// 打印结果
        }
  }
