package main.scala.utils

import java.util

import org.I0Itec.zkclient.{IZkChildListener, ZkClient}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.kafka010.OffsetRange

import scala.collection.{ mutable}


class ZkUtil(servers:String) {
  private val zkClient:ZkClient = new ZkClient(servers)
  sys.addShutdownHook{
    //Ensure that, once executor JVM shutdown,the Kafka producer sends and buffered messages to kafka before shutting down
    zkClient.close()
  }

  /**
   * 根据单个topic，获取zookeeper 中存储的每个分区的偏移量
   * @param Path   消费者组root 路径
   * @param consumerGroup 消费者组
   * @param kafkaTopic   topic
   * @return  list 形式的集合，包含partition 信息和偏移量信息
   */
  def getFromOffset(Path:String,consumerGroup:String,kafkaTopic:String):List[(TopicPartition,Long)]={
    val zkTopicPath:String = s"$Path/$consumerGroup/$kafkaTopic"
    if (!zkClient.exists(zkTopicPath)){
       List()
    }else{
      val childrens: util.List[String] = zkClient.getChildren(zkTopicPath)
      import scala.collection.JavaConverters._
      val fromRange: mutable.Buffer[(TopicPartition, Long)] = for {p <- childrens.asScala} yield {
        val offset: Long = zkClient.readData[Long](s"$zkTopicPath/$p")
        (new TopicPartition(kafkaTopic, Integer.parseInt(p)), offset)
      }
      fromRange.toList
    }
  }

  /**
   * 根据多个topic，获取zookeeper 中存储的每个topic每个分区的偏移量
   * @param offsetPath
   * @param group
   * @param topics
   * @return
   */
  def getFromOffsets(offsetPath:String,group:String,topics:Array[String]):Map[TopicPartition,Long]={
    var offsetMap: Map[TopicPartition, Long] = Map[TopicPartition, Long]()
    for (topic <- topics){
      val offsetList: List[(TopicPartition, Long)] = getFromOffset(offsetPath, group, topic)
      // map1 ++ map2  map合并，使用 ++ 运算符或 Map.++() 方法来连接两个 Map，Map 合并时会移除重复的 key【后面map的key替换前面的】
      // map ++ seq[(key,value)]
      /** Adds a number of elements provided by a traversable object
       *  and returns a new collection with the added elements.
       *
       *   xs      the traversable object consisting of key-value pairs.
       *   return a new immutable map with the bindings of this map and those from `xs`.
       *   override def ++[V1 >: V](xs: GenTraversableOnce[(K, V1)]): immutable.Map[K, V1] =
       *   ((repr: immutable.Map[K, V1]) /: xs.seq) (_ + _)
       *   ++底层调用 的是foldLeft  /:
       */
     offsetMap =  (offsetMap ++ offsetList)
    }
    offsetMap
  }

  /**
   * 获取consumerGroup 下topic 的偏移量信息
   * @param Path
   * @param consumerGroup
   * @param kafkaTopic
   * @return  （topicName，partition，offset）
   */
  def getFromOffsetTuple(Path:String,consumerGroup:String,kafkaTopic:String):List[(String,Int,Long)]={
    val zkTopicPath : String = s"""$Path/$consumerGroup/$kafkaTopic"""
    if(!zkClient.exists(zkTopicPath)){
       List()
    }else{
      val childrens: util.List[String] = zkClient.getChildren(zkTopicPath)
      import scala.collection.JavaConverters._
      val fromRange: mutable.Buffer[(String, Int, Long)] = for (ch <- childrens.asScala) yield {
        val offset: Long = zkClient.readData[Long](s"""$zkTopicPath/$ch""")
        (kafkaTopic, Integer.parseInt(ch), offset)
      }
      fromRange.toList
    }
  }

  /**
   * 存储kafka 读取偏移量 信息到zookeeper中
   * @param Path  偏移量存储根路径
   * @param consumerGroup topic消费组名称
   * @param offsetRanges  数据中包含的偏移量信息，包括，topic,partition,fromOffset,untilOffset
   */
  def storeOffsets(Path:String,consumerGroup:String,offsetRanges:Array[OffsetRange]):Unit={
      val zkTopicPath:String = s"$Path/$consumerGroup"
    val offsetMap = new mutable.HashMap[Integer, Long]()
    for (oo <- offsetRanges){
      val offsetPath = s"$zkTopicPath/${oo.topic}/${oo.partition}"
      if (!zkClient.exists(offsetPath)){
        createNode(offsetPath,"")
      }
      zkClient.writeData(offsetPath,oo.untilOffset)
    }
  }
//  def getFromOffset(offsetPath:String,group:String,topics:Array[String]):Map[TopicPartition,Long]{
//
//  }

  /**
   * 基本操作
   * @param parentPath parentNodePath
   * @param childPath  childNodePath
   * @return zkClient / true / false
   */

  def createNode(parentPath:String,childPath:String):Any={
    zkClient.createPersistent(s"$parentPath/$childPath",true)
  }
  def getZkClient():ZkClient={
    return zkClient
  }
  def isExists(path:String):Boolean={
    return zkClient.exists(path)
  }

  /**
   * 读取kafka 数据时，通过df.dtype api获取其schema信息，然后用storeSchema 方法保存到 zookeeper中
   * @param path
   * @param dtype
   */
  def storeSchema(path:String,dtype:Array[String])={
    if (!zkClient.exists(path)){
      zkClient.createPersistent(path,true)
    }
    val str: String = dtype.mkString("\t")
    zkClient.writeData(path,str)
  }

  /**
   * 从zookeeper中获取上次存入的schema 信息
   * @param path
   * @return
   */
  def getFromSchema(path:String):Array[String]={
    zkClient.readData[String](path,true).split("\t")
  }

  def stopSparkStreaming(path:String,ssc:StreamingContext): Unit ={
    if(!zkClient.exists(path)){
      zkClient.createPersistent(path,true)
    }
    zkClient.subscribeChildChanges(path,new IZkChildListener {
      override def handleChildChange(parentPath: String, currentChilds: util.List[String]): Unit = {
        zkClient.deleteRecursive(parentPath)
        zkClient.close()
        ssc.stop()
      }
    })
  }
}
