package com.atguigu.stream.test

import com.atguigu.cm.constan.KafkaCons
import com.atguigu.stream.util.MySparkStreamContextUtil
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaCluster.Err
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaCluster, KafkaUtils, OffsetRange}

/**
 * description ：低阶API读取kafka数据，手动维护offset
 * author      ：剧情再美终是戏
 * mail        : 13286520398@163.com
 * date        ：Created in 2020/1/15 15:09
 * modified By ：
 * version:    : 1.0
 */
object LowKafkaSourceStreamLessOne {

  val groupId = "0830"

  val topics: Set[String] = Set[String]("sparkstream")

  def main(args: Array[String]): Unit = {

    // 获取 streamSession
    val ssc: StreamingContext = MySparkStreamContextUtil.get(args)

    // 设置kafka相关参数
    val kafkaParams = Map[String, String](
      ConsumerConfig.GROUP_ID_CONFIG -> groupId,
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> KafkaCons.BOOTSTRAP_SERVERS_CONFIG
    )

    // 获取kafkaCluster实例
    val cluster: KafkaCluster = new KafkaCluster(kafkaParams)

    // 读取kafka数据
    val source: InputDStream[(String, String)] = KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](
      ssc,
      kafkaParams,
      fromOffsets(cluster, topics, groupId),
      (message: MessageAndMetadata[String, String]) => (message.key, message.message)
    )

    // 输出结果
    source.print()

    // TODO 保存的offset是会保存在zookeeper里面的
    // TODO sh kafka-consumer-groups.sh --zookeeper hadoop102:2181 --list
    // 保存消费的offset
    saveOffsets(cluster, groupId, source)

    // 启动 streamingContext
    ssc.start()

    // 让 streamingContext 运行端的driver阻塞，一直运行
    ssc.awaitTermination()
  }

  /**
   * 提交某消费者组下所有的分区及分区的offset
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/15 18:29
   * @param cluster kafka集群
   * @param groupId 需要提交的主题
   * @param source  kafkaStream返回的数据
   * @return void
   * @Version 1.0
   **/
  def saveOffsets(cluster: KafkaCluster, groupId: String, source: InputDStream[(String, String)]): Unit = {
    source.foreachRDD {
      rdd =>
        // 将InputDStream转换为HasOffsetRanges，并获取所有的OffsetRange
        val ranges: HasOffsetRanges = rdd.asInstanceOf[HasOffsetRanges]
        val rangesList: Array[OffsetRange] = ranges.offsetRanges

        // 记录所有的topice和对应的offset
        var offsets: Map[TopicAndPartition, Long] = Map[TopicAndPartition, Long]()
        rangesList.foreach {
          or =>
            offsets += (or.topicAndPartition() -> or.untilOffset)
        }
        cluster.setConsumerOffsets(groupId, offsets)
    }
  }

  /**
   * 根据消费者组id获取主题下的所有分区及消费offset
   *
   * @Author 剧情再美终是戏
   * @Date 2020/1/15 18:13
   * @param cluster kafka集群
   * @param topics  需要查询的主题
   * @param groupId 消费者组id
   * @return scala.collection.immutable.Map<kafka.common.TopicAndPartition,java.lang.Object>
   * @Version 1.0
   **/
  def fromOffsets(cluster: KafkaCluster, topics: Set[String], groupId: String): Map[TopicAndPartition, Long] = {
    // 定义返回结果
    var result = Map[TopicAndPartition, Long]()

    // 通过主题获取相应的分区和offset
    val partitionMetadata: Either[Err, Set[TopicAndPartition]] = cluster.getPartitions(topics)
    partitionMetadata match {
      case Left(_) => // 如果不存在分区，就什么也不操作
      case Right(topicAndPartitions) =>
        val topicAndPartitionMap: Either[Err, Map[TopicAndPartition, Long]] = cluster.getConsumerOffsets(groupId, topicAndPartitions)
        topicAndPartitionMap match {
          case Right(tapm) => result = tapm
          case Left(_) => // 如果分区存在，但是没有offset记录说明是第一次消费，初始化offset为0
            topicAndPartitions.foreach {
              taps => result += (taps -> 0L)
            }
        }
    }
    result
  }
}
