package com.peng.sparktest.sparkstreaming

import java.util

import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord, OffsetAndMetadata, OffsetCommitCallback}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Duration, StreamingContext}

object StreamTest04_StreamByKafka {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setAppName("stream_on_kafka")
      .setMaster("local[2]")

    conf.set("spark.streaming.backpressure.enabled", "true") //压力检测，当发现一次处理时间过长时，会调整拉取条数
    conf.set("spark.streaming.kafka.maxRatePerPartition", "1") //每分区最大拉取条数
    //        conf.set("spark.streaming.backpressure.initialRate","1")
    //    conf.set("spark.streaming.stopGracefullyOnShutdown","true")

    val context = new StreamingContext(conf, Duration(1000))

    context.sparkContext.setLogLevel("ERROR")

    val map: Map[String, Object] = Map(
      (ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "dream01:9092,dream02:9092,dream03:9092"),
      (ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer]),
      (ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer]),
      (ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"),
      (ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"), //需要手动维护offset 1，kafka    2，第三方
      (ConsumerConfig.GROUP_ID_CONFIG, "group666")
      //由spark控制下的consumer此拉取个数配置不生效
      //应该使用 conf.set("spark.streaming.kafka.maxRatePerPartition","1")   代替
      //      (ConsumerConfig.MAX_POLL_RECORDS_CONFIG,"1")
    )


    /**
     * 如果offset交由第三方持久化管理，
     * 可以这样取曾经持久化的offset
     *
     */
    val mapsql = Map[TopicPartition, Long](
      (new TopicPartition("from mysql topic", 0), 33),
      (new TopicPartition("from mysql topic", 1), 32)
    )


    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      context,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe(Array("topic02"), map)
      //      ConsumerStrategies.Subscribe(Array("topic02"), map,mapsql) //根据mapsql调整topic的分区偏移量
    )


    //    stream.print() //不可以直接调用print（），还需要将由KafkaUtils创建的stream进行二次转换

    val resStream: DStream[(String, (String, Long, String, Int))] = stream.map(record => {
      val key: String = record.key()
      val offset: Long = record.offset()
      val topic: String = record.topic()
      val value: String = record.value()
      val partition: Int = record.partition()

      (key, (value, offset, topic, partition))
    })

    //即使不对offset进行提交，在本次运行中，offset会在内存中有存储
    //维护offset是为了什么，哪个时间点用起你维护的offset？：application重启的时候，driver重启的时候
    //维护offset的另一个语义是什么：持久化


    //spark  如何对kafka的消息的offset进行提交呢？
    //这涉及到 如何获得offset 以及如何提交？
    stream.foreachRDD((rdd: RDD[ConsumerRecord[String, String]]) => {
      /**
       * * 这相当于在driver端 每一个job跑完，根据job内的task是否异常情况，来决定是否提交一次offset
       */

      /**
       * 1、只对offset进行提交，数据操作交由各task进行处理
       * 有风险： 消息已经部分被消费，但出现故障导致offset不进行提交，后续消息可能被再次消费
       */
      //获得这批RDD中的各个offset情况
      val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      //提交offset
      stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges, new OffsetCommitCallback {
        //异步，会有延迟
        override def onComplete(offsets: util.Map[TopicPartition, OffsetAndMetadata], exception: Exception): Unit = {
          if (offsets != null) {
            println(s"offsets:${offsets}")
          }
        }
      })

      /**
       * 2、将数据拉回driver
       */
      //将数据拉回driver
      val localData: Array[ConsumerRecord[String, String]] = rdd.map(item => item).collect()
      //接下来可以进行操作:
      //开启事务
      //提交数据
      //提交offset
      //提交事务
      //此时，一致性是最高的  也可避免数据被重复操作消费
      //这个操作也可用于将数据和offset维护到第三方

    })

    resStream.print()


    context.start()
    context.awaitTermination()

  }


}
