package com.gitee.dufafei.spark.streaming.offset

import kafka.api.{OffsetRequest, PartitionOffsetRequestInfo}
import kafka.common.TopicAndPartition
import kafka.consumer.SimpleConsumer
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.spark.streaming.kafka010.OffsetRange

import scala.collection.mutable

class ZkOffset(zkServer: String) {

  lazy val zkClient: ZkClient =  ZkUtils.createZkClient(zkServer, 30000, 30000)
  lazy val zkUtils: ZkUtils = ZkUtils.apply(zkClient, isZkSecurityEnabled = false)

  /**
   * 非第一次读取，如果保存的偏移量过期则从kafka读取最新的偏移量
   */
  def getBeginningOffset(topics: Seq[String], groupId: String): mutable.HashMap[TopicPartition, Long] = {
    val fromOffsets = mutable.HashMap.empty[TopicPartition,Long]
    val partitionMap = zkUtils.getPartitionsForTopics(topics)
    partitionMap.foreach{ topicPartitions =>
      val topic = topicPartitions._1
      val topicDirs = new ZKGroupTopicDirs(groupId, topic)
      topicPartitions._2.foreach{ partition =>
        val tp = new TopicPartition(topic, partition)
        val kafkaOffset = getOffset2Kafka(zkUtils, tp)
        val zkPath = s"${topicDirs.consumerOffsetDir}/$partition"
        zkUtils.makeSurePersistentPathExists(zkPath)
        Option(zkUtils.readData(zkPath)._1) match {
          case Some(zkOffset) =>
            if(zkOffset.toLong < kafkaOffset) fromOffsets += tp->kafkaOffset
            else fromOffsets += tp->zkOffset.toLong
          case None => fromOffsets += tp->kafkaOffset
        }
      }
    }
    fromOffsets
  }

  /**
   * 保存偏移量
   */
  def setEndOffset(offsetRanges:Array[OffsetRange], groupId:String): Unit = {
    offsetRanges.foreach{ offsetRange =>
      val topicDirs = new ZKGroupTopicDirs(groupId, offsetRange.topic)
      val zkPath = s"${topicDirs.consumerOffsetDir}/${offsetRange.partition}"
      zkUtils.updatePersistentPath(zkPath, offsetRange.untilOffset.toString)
    }
  }

  /**
   * 从kafka处获取偏移量
   */
  def getOffset2Kafka(zkUtils: ZkUtils, tp: TopicPartition,
                      time: Long = OffsetRequest.EarliestTime, soTimeout: Int = 10000, bufferSize: Int = 100000): Long = {
    val brokerId = zkUtils.getLeaderForPartition(tp.topic, tp.partition).get
    val broker = zkUtils.getBrokerInfo(brokerId).get
    val endpoint = broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT)
    val consumer = new SimpleConsumer(endpoint.host, endpoint.port, soTimeout, bufferSize, "getOffset")
    val tnp = TopicAndPartition(tp.topic, tp.partition)
    val request= OffsetRequest(Map(tnp -> PartitionOffsetRequestInfo(time, 1)))
    consumer.getOffsetsBefore(request).partitionErrorAndOffsets(tnp).offsets.head
  }
}

object ZkOffset {

  def apply(zkServer: String): ZkOffset = new ZkOffset(zkServer)
}