package sparkstreaming.nineteenthday2.lesson1

import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{HashPartitioner, SparkConf, SparkContext}

/**
  * 统计最近每隔4秒统计最近6秒的单词计数情况
  * 6 4 必须是 2 的倍数 才可以 否则编译不能通过
  */
object WindowOperatorTest {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName(s"${this.getClass.getSimpleName}").setMaster("local[4]")
    val sc = new SparkContext(conf)
    val ssc: StreamingContext = new StreamingContext(sc, Seconds(2))


    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop01", 9999)

    /**
      *  reduceFunc: (V, V) => V,
         *windowDuration: Duration,  窗口的大小 6
         *slideDuration: Duration,   滑动的大小
         *numPartitions: Int
      */
    val wordcountDStream = dstream.flatMap(line => line.split(","))
      .map((_, 1))
      .reduceByKeyAndWindow(
        (x: Int, y: Int) => {
          x + y
        }, Seconds(6), Seconds(4), new HashPartitioner(4))

    wordcountDStream.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

}
