package com.atguigu.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}

object SprakStreaming09_Window1 {
  def main(args: Array[String]): Unit = {

    val conf: SparkConf = new SparkConf().setAppName("window").setMaster("local[*]")
    val ssc = new StreamingContext(conf,Seconds(3))
    ssc.checkpoint("cp")


    //从网络端口获取数据,拿到一行字符串
    val socketData: ReceiverInputDStream[String] = ssc.socketTextStream("localhost",9999)
    //将数据进行扁平化处理
    val words: DStream[String] = socketData.flatMap(_.split(" "))
    //将数据结构进行转换
    val wordToOne: DStream[(String, Int)] = words.map((_,1))


    //默认情况下，会将一个采集周期的数据进行计算，当一个采集周期的数据到达时候
    //将多个采集周期进行数据的计算，需要设定整体的范围，称之为窗口
    //随着时间的推移，窗口也会滑动，所以称之为滑动窗口，
    //窗口的范围一般为采集周期的整数倍
    //sparkStreaming的计算周期并不是以采集周期为单位，而是以窗口的滑动幅度为单位.


    //如果窗口的滑动幅度小于窗口的范围，那么在每一次滑动窗口时候，可能产生重复的数据
    val wordCount: DStream[(String, Int)] = wordToOne.reduceByKeyAndWindow(
      (x: Int, y: Int) => {
        println(x + "+" + y)
        x + y
      },
      (x: Int, y: Int) => {
        println(x + "-" + y)
        x - y
      },
      Seconds(6), Seconds(3)
    )
    wordCount.print()



    ssc.start()
    ssc.awaitTermination()


  }
}
