package org.example

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SparkStreData1_1 {
  def main(args: Array[String]): Unit = {
//    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
//      //  5秒统计一次
//    val ssc = new StreamingContext(sparkConf, Seconds(5)) //  监听并读取数据(一行行)
////    val lines = ssc.socketTextStream("192.168.64.129", 9999)
//// 数据处理
//    val dStream: ReceiverInputDStream[String] =
//      ssc.socketTextStream("172.16.104.23",9999)
//    val lines: DStream[String] = dStream.flatMap(_.split(","))
//    val wordDStream: DStream[(String,Int)] = lines.map((_,1))
//
//    val result = wordDStream.reduceByKey(_+_)
//
//
//    //数据保存
//    result.print()
//    ssc.start()
//    ssc.awaitTermination()
//    ssc.stop()

    //2.词频累加

//    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
//    // 5秒统计一次
//
//    val ssc = new StreamingContext(sparkConf, Seconds(5))
//    // 设置检查点目录，用于保存累加状态
//    ssc.checkpoint("checkpoint_dir")
//    //数据处理
//    val dStream: ReceiverInputDStream[String] =
//      ssc.socketTextStream("172.16.104.23",9999)
//    //  词频统计
//    val lines: DStream[String] = dStream.flatMap(_.split(" "))
//    //定义函数用于累计每个单词出现的次数
//    // 统计每个单词的出现次数（当前批次）
//    val wordCounts = lines.map(word => (word, 1))
//    // 定义累加函数
//    val updateFunc = (values: Seq[Int], state: Option[Int]) => {
//    val currentCount = values.sum
//    val previousCount = state.getOrElse(0)
//     Some(currentCount + previousCount)
//    }
//    // 使用updateStateByKey进行累加统计
//     val runningCounts = wordCounts.updateStateByKey(updateFunc)
//    // 打印结果
//    runningCounts.print()
//    ssc.start()
//    ssc.awaitTermination()
    //

//    3.单词过滤
    val conf = new SparkConf().setAppName("FilteredWordCount").setMaster("local[2]")
    val ssc = new StreamingContext(conf, Seconds(5))
    val lines = ssc.socketTextStream("localhost", 9999)
      // 定义要过滤的单词列表
      val stopWords = Set("a", "an", "the", "this", "that")
      val words = lines.flatMap(_.split("\\s+"))
        .filter(_.matches("[a-zA-Z]+")) // 只保留纯字母单词
        .map(_.toLowerCase) // 转为小写
    .filter(!stopWords.contains(_))  // 过滤停用词
    val wordCounts = words.map(word => (word, 1)).reduceByKey(_ + _)
    wordCounts.print()
    ssc.start()
    ssc.awaitTermination()



  }

}
