import java.lang

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import com.typesafe.config.ConfigFactory

/**
  * 使用窗口函数会改变spark rdd 的分区：
  * 前后的分区变化配属关系为:  1 :   window_duration_ts / batch_time
  */
object WindowApp {

  def main(args: Array[String]): Unit = {

    val conf = ConfigFactory.load()
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("windows")
    sparkConf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer") //解决了consumerRecord没有序列化的问题
    sparkConf.set("spark.default.parallelism","10")
    val ssc = new StreamingContext(sparkConf,Seconds(5))

    val kafkaParams: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> conf.getString("kafka.brokers")
      , "key.deserializer" -> classOf[StringDeserializer]
      , "value.deserializer" -> classOf[StringDeserializer]
      , "group.id" -> "transactional-example"
      , "enable.auto.commit" -> (false:lang.Boolean)
      , "auto.offset.reset" -> "earliest"
    )

    val topics = conf.getString("kafka.topics").split(",").toSet

    val stream = KafkaUtils.createDirectStream(
      ssc,PreferConsistent,Subscribe[String,String](topics,kafkaParams)
    )

    var offsetRanges: Array[OffsetRange] = Array[OffsetRange]()

    stream.transform{rdd=>
      offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      println("got offset ranges on the driver:\n" + offsetRanges.mkString("\n"))
      println(s"number of kafka partitions before windowing : ${offsetRanges.size}")
      println(s"number of spark partition before windowing:  ${rdd.partitions.size}")
      rdd
      //每隔2s计算6s的窗口内容


    }.window(Seconds(20), Seconds(5)).foreachRDD{rdd=>
      //使用窗口函数后，就会有多个输入的rdd分区，，而不仅仅是最近的分区
      println(s"number of spark partitions after window: " +
        s"${rdd.partitions.size}")

      rdd.foreachPartition{iter =>
        println("read offset ranges on the executor \n" + offsetRanges.mkString("\n"))
        println(s"this partition id ${TaskContext.getPartitionId()}")
        iter.foreach(println)
      }

    }
    ssc.start()
    ssc.awaitTermination()
  }

}
