package com.atguigu0.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * @description: sparkStreaming案例,无状态,每次统计单词的个数不会进行合并,5s内数据互相干.有状态转换,微批次处理框架
 * @time: 2020/6/15 20:54
 * @author: baojinlong
 **/
object WordCountStreaming3 {

  def main(args: Array[String]): Unit = {
    // 定义更新方法,参数values为当前批次单词频度,state为以往批次单词频度,匿名函数
    val updateFunc: (Seq[Int], Option[Int]) => scala.Some[Int] = (values: Seq[Int], state: Option[Int]) => {
      // 当前单词的统计总和
      val currentCount: Int = values.sum
      // 已经存在单词个数统计
      val previousCount: Int = state.getOrElse(0)
      // 累加求和
      Some(currentCount + previousCount)
    }
    // 创建SparkConf
    val sparkConf: SparkConf = new SparkConf().setMaster("local[*]").setAppName("wordCount")
    // 创建SteamingContext
    val ssc = new StreamingContext(sparkConf, Seconds(3))
    // 注意必须要设置ck
    ssc.checkpoint("E:/test-data/input/ck32")

    // 创建 Dstream
    val line: ReceiverInputDStream[String] = ssc.socketTextStream("localhost", 9999)
    // 压平数据
    val word: DStream[String] = line.flatMap(_.split(" "))
    // 将单词转成元组
    val wordAndOne: DStream[(String, Int)] = word.map((_, 1))
    // 按照key进行更新
    val wordAndCount: DStream[(String, Int)] = wordAndOne.updateStateByKey(updateFunc)
    // 打印数据
    wordAndCount.print
    // 开启sparkStreaming
    ssc.start()
    // 等待执行
    ssc.awaitTermination()
  }
}
