package com.shujia.streaming

import org.apache.spark.streaming.{Durations, StreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.{SparkConf, SparkContext}

object Code04Window {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setMaster("local[2]").setAppName("wordCountStreaming"))

    sc.setCheckpointDir("spark_code/data/stream/checkpoint")

    //  batchDuration 表示一个批次的间隔时间  默认为一个滚动窗口(窗口大小和滑动步长一致)
    val streamingContext = new StreamingContext(sc, Durations.seconds(5))
    val stream: ReceiverInputDStream[String] = streamingContext.socketTextStream("master", 8888)

    // 按照窗口大小为10s 滑动步长为 5s形成一个批次数据

    stream
      .flatMap((line: String) => line.split(","))
      .map((word: String) => (word, 1))
      /*
        普通方式：
        def reduceByKeyAndWindow(
          reduceFunc: (V, V) => V,  表示ReduceByKey的处理函数
          windowDuration: Duration, 表示窗口的时间间隔
          slideDuration: Duration   表示步长时间间隔
        )
       */
      //      .reduceByKeyAndWindow(
      //        // 对于算子中的计算方法，最好给定匿名函数的非简写模式
      //        (x: Int, y: Int) => {
      //          x + y
      //        }
      //        , Durations.seconds(10)
      //        , Durations.seconds(5)
      //      ).print()
      /**
       * 优化方式：
       * def reduceByKeyAndWindow(
       * reduceFunc: (V, V) => V,
       * invReduceFunc: (V, V) => V,
       * windowDuration: Duration,
       * slideDuration: Duration = self.slideDuration,
       * numPartitions: Int = ssc.sc.defaultParallelism,
       * filterFunc: ((K, V)) => Boolean = null
       * )
       */
      .reduceByKeyAndWindow(
        // 对于算子中的计算方法，最好给定匿名函数的非简写模式
        (x: Int, y: Int) => {
          x + y
        }
        , (z: Int, x: Int) => {
          z - x
        }
        , Durations.seconds(10)
        , Durations.seconds(5)
      ).print()


    streamingContext.start()
    streamingContext.awaitTermination()

  }
}
