package kafkaScala


import java.util.Properties

import kafka.api.OffsetRequest
import kafka.api.PartitionOffsetRequestInfo
import kafka.common.KafkaException
import kafka.common.TopicAndPartition
import kafka.consumer.ConsumerConfig
import kafka.consumer.SimpleConsumer
import kafka.tools.UpdateOffsetsInZK
import kafka.utils.Utils
import kafka.utils.ZKGroupTopicDirs
import kafka.utils.ZKStringSerializer
import kafka.utils.ZkUtils
import org.I0Itec.zkclient.ZkClient
import kafka.consumer.{SimpleConsumer, ConsumerConfig}
import kafka.api.{PartitionOffsetRequestInfo, OffsetRequest}
import kafka.common.{TopicAndPartition, KafkaException}
import kafka.utils.{ZKGroupTopicDirs, ZkUtils, ZKStringSerializer, Utils}


/**
 *  A utility that updates the offset of every broker partition to the offset of earliest or latest log segment file, in ZK.
 */
object UpdateOffsetsInZK {
  val Earliest = "earliest"
  val Latest = "latest"
  def createConsumerConfig: ConsumerConfig = {
    val props = new Properties()
    props.put("zookeeper.connect", "s60:2181")
    props.put("group.id", "1")
    props.put("auto.offset.reset", "smallest")
    props.put("zookeeper.session.timeout.ms", "400")
    props.put("zookeeper.sync.time.ms", "200")
    props.put("auto.commit.interval.ms", "10")
    val config = new ConsumerConfig(props)
    config
  }
  def main(args: Array[String]) {
//    if(args.length < 3)
//      usage
    //val config = new ConsumerConfig(Utils.loadProps(args(1)))
    val  config=createConsumerConfig
  val  topic="testkafka-topic"
    val Off="earliest"
    val zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs,
      config.zkConnectionTimeoutMs, ZKStringSerializer)
    Off match {
        /*
           val LatestTime = -1L
          val EarliestTime = -2L
         */
      case Earliest => getAndSetOffsets(zkClient, OffsetRequest.EarliestTime, config,topic)
      case Latest => getAndSetOffsets(zkClient, OffsetRequest.LatestTime, config,topic)
      case "off" => getAndSetOffsets(zkClient, 250L, config,topic)
      case _ => usage
    }
  }

  private def getAndSetOffsets(zkClient: ZkClient, offsetOption: Long, config: ConsumerConfig, topic: String): Unit = {
    val partitionsPerTopicMap = ZkUtils.getPartitionsForTopics(zkClient, List(topic))
    var partitions: Seq[Int] = Nil

    //topic
    partitionsPerTopicMap.get(topic) match {
      case Some(l) =>  partitions = l.sortWith((s,t) => s < t)
      case _ => throw new RuntimeException("Can't find topic " + topic)
    }

    var numParts = 0
    for (partition <- partitions) {
      //  brokerhost
      val brokerHostingPartition = ZkUtils.getLeaderForPartition(zkClient, topic, partition)

      val broker = brokerHostingPartition match {
        case Some(b) => b
        case None => throw new KafkaException("Broker " + brokerHostingPartition + " is unavailable. Cannot issue " +
          "getOffsetsBefore request")
      }

      ZkUtils.getBrokerInfo(zkClient, broker) match {

        //使用的 低级api  获取zk 信息 ,并写入offset 到zk
        case Some(brokerInfo) =>
         // val kafkaConsumer = new SimpleConsumer(brokerInfo.host, brokerInfo.port, 10000, 100 * 1024, "UpdateOffsetsInZk")
          //val topicAndPartition = TopicAndPartition(topic, partition)
          //设置 offset
          //val request = OffsetRequest(Map(topicAndPartition -> PartitionOffsetRequestInfo(offsetOption, 1)))
          //val offset = kafkaConsumer.getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition).offsets
          //  println("offset..."+ offset.toList)
          val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
         // println("updating partition " + partition + " with new offset: " + offset)
          //写入 zk
         // ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + partition, offset.toString)
          ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + partition, 250.toString)
          numParts += 1
        case None => throw new KafkaException("Broker information for broker id %d does not exist in ZK".format(broker))
      }
    }
    println("updated the offset for " + numParts + " partitions")
  }

  private def usage() = {
    println("USAGE: " + UpdateOffsetsInZK.getClass.getName + " [earliest | latest] kafkaConsumer.properties topic")
    System.exit(1)
  }
}
