package cn.lagou.test

import cn.lagou.test.MyKafkaUtils.getKafkaConsumerParams
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}

object KafkaStream {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)
    val conf: SparkConf = new SparkConf()
      .setMaster("local[2]")
      .setAppName(this.getClass.getCanonicalName)
    val ssc = new StreamingContext(conf, Seconds(2))

    val kafkaParams: Map[String, Object] = getKafkaConsumerParams()
    val topics: Array[String] = Array("topicB")

    // PreferBrokers: Use this only if your executors are on the same nodes as your Kafka brokers.
    // PreferConsistent: Use this in most cases, it will consistently distribute partitions across all executors.
    // PreferFixed: Use this to place particular TopicPartitions on particular hosts if your load is uneven.
    val dstream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams))
    // Subscribe 或 SubscribePattern，订阅topic；有自动分区发现功能(运行流期间添加分区)
    // Assign 指定一个固定的分区集合。无自动分区发现功能【没有特殊需求少用】
    dstream.foreachRDD{(rdd, time) =>
      if (!rdd.isEmpty()) {
        // 用数组存放所有分区的offset信息，数组的下标为RDD partitionID
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        rdd.foreachPartition{ iter =>
          val o: OffsetRange = offsetRanges(TaskContext.get.partitionId)
          println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
        }
        val offsets = for (range <- offsetRanges) yield {
          (range.topic, range.partition -> range.untilOffset)
        }
        val offsetsMap: Map[String, Map[Int, Long]] = offsets.groupBy(_._1).map { case (topic, buffer) => (topic, buffer.map(_._2).toMap) }

        for ((topic, partitionAndOffset) <- offsetsMap) {
          val offsets = partitionAndOffset.map(elem => (elem._1.toString, elem._2.toString))
          offsets.foreach(println)
        }
        println(s"****** rdd.count = ${rdd.count()}; time = $time ***********")
      }
    }

    ssc.start()
    ssc.awaitTermination()
  }
}
