package cn.edu360.streaming.utils

import kafka.common.TopicAndPartition
import kafka.utils.ZkUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaManager, OffsetRange}

/**
  * ZooKeeper工具类
  * wzxjava@126.com
  * Created by wangzhixuan on 2017/05/18 19:26
  */
object ZooKeeperOffsetUtil {

  // 读取offsets
  def readOffsets(zkUtils: ZkUtils, zkPath: String, kafkaParams: Map[String, String], topic: String): Option[Map[TopicAndPartition, Long]] = {
    val children = zkUtils.zkClient.countChildren(zkPath)
    val kafkaManager = new KafkaManager(kafkaParams)
    val offsets: Map[Int, Long] = kafkaManager.getEarliestLeaderOffsets(topic)
    // 判断之前是否有消费过
    var fromOffsets: Map[TopicAndPartition, Long] = Map()
    if (children > 0) {
      for (i <- 0 until children) {
        val tp = TopicAndPartition(topic, i)
        val partitionOffset = zkUtils.zkClient.readData[String](s"${zkPath}/${i}").toLong
        val kafkaOffset = offsets.get(i).get
        // 判断kafka中个最小offsets是否合法,否者可能出现OffsetOutOfRangeException
        if (kafkaOffset > partitionOffset) {
          fromOffsets += (tp -> kafkaOffset)
        } else {
          fromOffsets += (tp -> partitionOffset)
        }
      }
      Some(fromOffsets)
    } else {
      None
    }
  }

  // 保存offsets
  def saveOffsets(zkUtils: ZkUtils, zkPath: String, rdd: RDD[_]): Unit = {
    val offsetsRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
    for (o <- offsetsRanges) {
      val zkPaths = s"${zkPath}/${o.partition}"
      zkUtils.updatePersistentPath(zkPaths, o.fromOffset.toString)
    }
  }

}
