package org.example

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

object sapckst2 {

  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    val ssc = new StreamingContext(sparkConf, Seconds(5))
    ssc.checkpoint("checkpoint_dir")

    // 从文件读取数据
    val dStream = ssc.textFileStream("src/main/resources/words.txt")

    val lines: DStream[String] = dStream.flatMap(_.split(" "))
    val wordCounts = lines.map(word => (word, 1))

    val updateFunc = (values: Seq[Int], state: Option[Int]) => {
      val currentCount = values.sum
      val previousCount = state.getOrElse(0)
      Some(currentCount + previousCount)
    }

    // 修正：将runningCounts类型改为DStream
   val runningCounts: DStream[(String, Int)] = wordCounts.updateStateByKey(updateFunc)

    // 统计前5名单词
    val top5 = runningCounts.transform(rdd => {
      val sortedRDD = rdd.map { case (word, count) => (count, word) }
        .sortByKey(false)
      val top5Array = sortedRDD.take(5)
      rdd.sparkContext.parallelize(top5Array.map { case (count, word) => (word, count) })
    })

    // 打印前5名结果
    top5.print()

    ssc.start()
    ssc.awaitTermination()
  }
}