package com.xiaoniu.kafkastreaming

import kafka.common.TopicAndPartition
import kafka.serializer.StringDecoder
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaCluster, KafkaUtils, OffsetRange}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  *
  */
object BdKafkaDirectStream {

  val groupID = "sparkstreaming"
  val kafkaParams = Map[String, String](
    "metadata.broker.list" -> "192.168.11.128:9092",
    "group.id" -> groupID)

  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setAppName("BdKafkaDirectStream").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)
    sc.setLogLevel("WARN")
    val ssc = new StreamingContext(sc, Seconds(5))
    val topicsSet = Set("xnsparkstreaming")

    val messages = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topicsSet)

    messages.flatMap(tuple => {tuple._2.split(" ")}).map{ word => (word, 1) }.reduceByKey(_ + _).print()

    messages.foreachRDD(rdd => {
      // 把RDD转成HasOffsetRanges类型（KafkaRDD extends HasOffsetRanges）
      // OffsetRange 说明：Represents a range of offsets from a single Kafka TopicAndPartition.
      // OffsetRange 说明： Instances of this class can be created with `OffsetRange.create()`.
      val offsetsList: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      //     offsetRanges 的实现代码（KafkaRDD中）：tp：TopicAndPartition,fo:fromOffset
      //      val offsetRanges = fromOffsets.map { case (tp, fo) =>
      //        val uo = untilOffsets(tp)
      //        OffsetRange(tp.topic, tp.partition, fo, uo.offset)
      //      }.toArray
      val kc = new KafkaCluster(kafkaParams)
      for (offsets <- offsetsList) {
        //TopicAndPartition 主构造参数第一个是topic，第二个是 partition id
        val topicAndPartition = TopicAndPartition("xnsparkstreaming", offsets.partition) //offsets.partition表示的是Kafka partition id
        val o = kc.setConsumerOffsets(groupID, Map((topicAndPartition, offsets.untilOffset)))
        if (o.isLeft) {
          println(s"Error updating the offset to Kafka cluster: ${o.left.get}")
        }
      }

    })

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
