package org.apache.spark.streaming.kafka

import com.gizwits.util.Constants
import kafka.common.TopicAndPartition
import kafka.serializer.StringDecoder
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka._

import  com.gizwits.util.CaseClasssDomain._
import  com.gizwits.util._
object KafkaOffset {


  def main(args: Array[String]) {

    val sparkConf = new SparkConf().setAppName("KafkaActor")
    val sc = new SparkContext(sparkConf)
    val ssc = new StreamingContext(sc, Seconds(1))
    ssc.checkpoint(".")

    val kafkaParams = Map(
      "zookeeper.connect" ->Constants.ZKQUORUM,
      "group.id" ->Constants.GROUP,
      "metadata.broker.list" -> Constants.BROKERS,
      "auto.offset.reset" -> "smallest")

    val topicsSet = Set(Constants.KafkaTopic)

    val messages = KafkaUtils.createDirectStream[String,User,StringDecoder,MsgCommandDecoder[User]](ssc, kafkaParams, topicsSet)
    messages.foreachRDD(rdd => {
      val offsetsList = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      val kc = new KafkaCluster(kafkaParams)
      for (offsets <- offsetsList) {
        println("log...........offsets.partition ."+offsets.partition)
        println("log...........offsets.untilOffset ."+offsets.untilOffset)
        val topicAndPartition = TopicAndPartition(Constants.KafkaTopic, offsets.partition)
        val o = kc.setConsumerOffsets(args(0), Map((topicAndPartition, offsets.untilOffset)))
        if (o.isLeft) {
          println(s"Error updating the offset to Kafka cluster: ${o.left.get}")
        }
      }

    })

    messages.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }
}
