package com.atguigu.stream

import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * SparkStreaming 窗口函数
 */
object TestSparkStreaming_Window {

  def main(args: Array[String]): Unit = {

    // 创建配置对象
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("TestSparkStreaming_WordCount")

    // 创建流式处理环境对象
    // 创建对象时，需要传递采集数据的周期（时间）
    val streamingConext = new StreamingContext(sparkConf, Seconds(3))
    //streamingConext.sparkContext.setCheckpointDir("cp")

    val socketDStream: ReceiverInputDStream[String] = streamingConext.socketTextStream("linux1", 9999)

    // 设定数据窗口：window
    // 第一个参数表示窗口的大小（时间的范围，应该为采集周期的整数倍）
    // 第二个参数表示窗口的滑动的幅度（时间的范围，应该为采集周期的整数倍）
    val windowDStream: DStream[String] = socketDStream.window(Seconds(6), Seconds(3))

    val wordDStream: DStream[String] = windowDStream.flatMap(_.split(" "))

    val wordToCountDStream: DStream[(String, Int)] = wordDStream.map((_, 1))

    val reduceDStream: DStream[(String, Int)] = wordToCountDStream.reduceByKey(_ + _)

    // 打印数据
    reduceDStream.print()

    // TODO 启动采集器
    streamingConext.start()

    // TODO Driver不能停止，等待采集器的结束
    // wait, sleep
    streamingConext.awaitTermination()
  }
}
