package streaming

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, OffsetAndMetadata, OffsetCommitCallback}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, LocationStrategy, OffsetRange}
import org.apache.spark.streaming.{Duration, StreamingContext}

import java.util

object lesson05_spark_kafka_consumer {


  def main(args: Array[String]): Unit = {


    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName("kafka_test")

    //开启反压
    conf.set("spark.streaming.backpressure.enabled", "true")
    //从每个分区每秒读取的条数
    conf.set("spark.streaming.kafka.maxRatePerPartition", "10")
    //起步时。第一次拉取的数据条数
    conf.set("spark.streaming.backpressure.initialRate","2")
    //设置为true，在关闭jvm 进程时将优雅的关闭Stream
    conf.set("spark.streaming.stopGracefullyOnShutdown","true")

    conf.set("spark.streaming.kafka.minRatePerPartition", "1") //默认为1
    conf.set("spark.streaming.kafka.maxRetries", "1") //最大连续重试次数 默认为1


    val ssc = new StreamingContext(conf, Duration(1000))

    ssc.sparkContext.setLogLevel("ERROR")

    val kafkaConfig = Map(
      (ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "node1:9092"),
      (ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"),
      (ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"),
      (ConsumerConfig.GROUP_ID_CONFIG, "group3"),
      (ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer]),
      (ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
    //    (ConsumerConfig.MAX_POLL_RECORDS_CONFIG,"1")  这个配置不会生效
    )


    val kafkaStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](List("qihong"), kafkaConfig)

    )


    val resStream: DStream[String] = kafkaStream.map(record => {
      if (record.partition() == 1) {
        println(s"${record.partition()}   ${record.offset()}")
      }
      val offfset: Long = record.offset()
      val p: Int = record.partition()
      val key: String = record.key()
      val value: String = record.value()
      value
    })


    resStream.foreachRDD(rdd => {
      println("rdd count:" + rdd.count())
    })



    //提交offset
    kafkaStream.foreachRDD(
      rdd => {
        val ranges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        kafkaStream.asInstanceOf[CanCommitOffsets].commitAsync(ranges, new OffsetCommitCallback {
          override def onComplete(offsets: util.Map[TopicPartition, OffsetAndMetadata], exception: Exception): Unit = {

            if (exception != null) {
              ranges.foreach(println)
              println("--------------")
              val iter: util.Iterator[TopicPartition] = offsets.keySet().iterator()
              while(iter.hasNext){
                val k: TopicPartition = iter.next()
                val v: OffsetAndMetadata = offsets.get(k)
                println(s"${k.partition()}...${v.offset()}")
              }
            } else {
              println(exception.getMessage)
            }
          }
        })
      }
    )


    ssc.start()
    ssc.awaitTermination()
  }

}