package cn.wanda.kafka

import cn.wanda.projects.kafkamanager.KafkaOffsetManager

import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.{KafkaCluster, KafkaUtils}

object KafkaHelper {
  lazy val log = org.apache.log4j.LogManager.getLogger("KafkaHelper")
  def loadTopicAndMessageFromKafka(ssc: StreamingContext,
                                   topics: Set[String],
                                   zkClient: ZkClient,
                                   zkOffsetPath: String,
                                   kafkaParams: Map[String, String]
                                  ): InputDStream[(String, String)] = {

//    val consumerOffsets = getFromOffsets(
//      new KafkaCluster(kafkaParams),
//      kafkaParams,
//      topics.split(",").toSet)
//    KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](
//      ssc,
//      kafkaParams,
//      consumerOffsets,
//      //提取topic
//      (mmd: MessageAndMetadata[String, String]) => (mmd.topic, mmd.message))

    val zkOffsetData=KafkaOffsetManager.readOffsets(zkClient,zkOffsetPath,topics.last)
    val kafkaStream = zkOffsetData match {
      case None =>  //如果从zk里面没有读到偏移量，就说明是系统第一次启动
        log.info("系统第一次启动，没有读取到偏移量，默认就最新的offset开始消费")
        //使用最新的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
      case Some(lastStopOffset) =>
        log.info("从zk中读取到偏移量，从上次的偏移量开始消费数据......"+lastStopOffset)
        val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
        //使用上次停止时候的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, lastStopOffset, messageHandler)
    }
    kafkaStream//返回创建的kafkaStream



  }






  private def getFromOffsets(kc: KafkaCluster,
                             kafkaParams: Map[String, String],
                             topics: Set[String]): Map[TopicAndPartition, Long] = {
    val reset = kafkaParams.get("auto.offset.reset").map(_.toLowerCase)
    val result = for {
      topicPartitions <- kc.getPartitions(topics).right
      // largest/smallest
      leaderOffsets <- (if (reset == Some("smallest")) {
        kc.getEarliestLeaderOffsets(topicPartitions)
      } else {
        kc.getLatestLeaderOffsets(topicPartitions)
      }).right
    } yield {
      leaderOffsets.map { case (tp, lo) =>
        (tp, lo.offset)
      }
    }
    KafkaCluster.checkErrors(result)
  }
}
