package com.shujia.stream

import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Durations, StreamingContext}

object Demo5Window {
  def main(args: Array[String]): Unit = {

    /**
      * 创建spark 环境
      *
      */

    val conf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName("wc")


    val sc = new SparkContext(conf)


    /**
      * 创建spark streaming环境
      * 指定每隔多久计算一次
      *
      */

    val ssc = new StreamingContext(sc, Durations.seconds(5))

    ssc.checkpoint("Spark/data/checkpoint")


    val linesDS: ReceiverInputDStream[String] = ssc.socketTextStream("master", 8888)


    val kvDS: DStream[(String, Int)] = linesDS.flatMap(_.split(",")).map((_, 1))


    /**
      * 统计最近15秒单词的数量，每隔5秒统计一次
      *
      *
      * 窗口大小和滑动时间必须是batch时间的整数倍
      *
      */

    /*val countDS: DStream[(String, Int)] = kvDS.reduceByKeyAndWindow(
      (x: Int, y: Int) => x + y, //聚合函数
      Durations.seconds(15), //窗口大小
      Durations.seconds(5) //滑动时间
    )*/

    /**
      * 如果窗口存在交叉的清空，会重复计算数据
      * 所有可以对窗口计算进行优化，
      *
      */

    val countDS: DStream[(String, Int)] = kvDS.reduceByKeyAndWindow(
      (x: Int, y: Int) => x + y, //聚合函数
      (i: Int, j: Int) => i - j, //减去多余数据的函数
      Durations.seconds(15), //窗口大小
      Durations.seconds(5) //滑动时间
    )


    countDS
      .filter(_._2 != 0) //过滤无效数据
      .print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
