package com.study.bigdata.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

object SparkStreaming06_State_Window {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("SparkStreaming")
    val ssc =new StreamingContext(sparkConf,Seconds(3))

    val lines = ssc.socketTextStream("localhost", 9999)
    val wordToOne = lines.map((_, 1))

    //将多个采集周期的数据当成一个整体
    //窗口的范围应该是采集周期的整数倍
    //窗口是可以滑动的，默认以一个采集周期滑动 可能出现重复数据的计算，为了避免这种情况，可以改变滑动的步长
    val windowDS = wordToOne.window(Seconds(6),Seconds(6))
    val wordToCount = windowDS.reduceByKey(_ + _)
    wordToCount.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
