package com.atguigu.sparkstreaming.demos

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * Created by Smexy on 2022/8/22
 *

 *
 *
 *    5秒1批，没间隔10s提交一次job，计算过去15秒的数据(会造成重复算)
 *
 *        默认情况下，不指定window,和slide，window和slide都等于batchDuration
 *
 *
 *   刚才:   ds.map(record => record.value())
 *            .window(Seconds(5),Seconds(10))
 *
 *   猜测:   在进行window，对 window大小进行了检查，可能认为刚才的时间是一个非法时间。进行强制更新。
 *          还需要进一步验证。
 */
object WindowDemo {

  def main(args: Array[String]): Unit = {


    val streamingContext = new StreamingContext("local[*]", "wordcount", Seconds(5))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hadoop102:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "sz220409test",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> "true"
    )


    val topics = Array("topicD")


    val ds: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
      streamingContext,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )


    ds.map(record => record.value())
      .window(Seconds(5),Seconds(10))
      //在输出之前调用window即可，无所谓位置
      .print(10000)


    streamingContext.start()


    streamingContext.awaitTermination()

  }

}
