package com.ibm.cps.spark.streaming

import java.util.Properties
import java.util.concurrent.Executors

import kafka.consumer.{KafkaStream, Consumer, ConsumerConfig, ConsumerConnector}
import kafka.serializer.Decoder
import kafka.utils.{ZKStringSerializer, VerifiableProperties}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.Logging
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.kafka.KafkaInputDStream
import org.apache.spark.streaming.receiver.Receiver

import scala.collection.Map
import scala.reflect._

/**
 * Created by telekinesis on 4/3/15.
 */
object KafkaStreamConnector {
  def createStream[K: ClassTag, V: ClassTag, U <: Decoder[_]: ClassTag, T <: Decoder[_]: ClassTag](
        ssc: StreamingContext,
        kafkaParams: Map[String, String],
        topics: Map[String, Int],
        storageLevel: StorageLevel
        ): ReceiverInputDStream[(K, V)] = {
    new TopicLabeledKafkaStream[K, V, U, T](ssc, kafkaParams, topics, storageLevel)
  }
}
class TopicLabeledKafkaStream[
  K: ClassTag,
  V: ClassTag,
  U <: Decoder[_]: ClassTag,
  T <: Decoder[_]: ClassTag](
    @transient ssc_ : StreamingContext,
    kafkaParams: Map[String, String],
    topics: Map[String, Int],
    storageLevel: StorageLevel
    ) extends ReceiverInputDStream[(K, V)](ssc_) with Logging {

  def getReceiver(): Receiver[(K, V)] = {
    println("initiating kafka receiver")
    println("params: " + kafkaParams)
    println("topics: " + topics)
    System.err.println("initiating kafka receiver")
    new TopicLabeledKafkaReader[K, V, U, T](kafkaParams, topics, storageLevel)
      .asInstanceOf[Receiver[(K, V)]]
  }
}

private[streaming]
class TopicLabeledKafkaReader[
  K: ClassTag,
  V: ClassTag,
  U <: Decoder[_]: ClassTag,
  T <: Decoder[_]: ClassTag](
    kafkaParams: Map[String, String],
    topics: Map[String, Int],
    storageLevel: StorageLevel
    ) extends Receiver[Any](storageLevel) with Logging {

  // Connection to Kafka
  var consumerConnector : ConsumerConnector = null

  def onStop() {
    if (consumerConnector != null) {
      consumerConnector.shutdown()
    }
  }

  def onStart() {

    logInfo("Starting Kafka Consumer Stream with group: " + kafkaParams("group.id"))

    // Kafka connection properties
    val props = new Properties()
    kafkaParams.foreach(param => props.put(param._1, param._2))

    val zkConnect = kafkaParams("zookeeper.connect")
    // Create the connection to the cluster
    logInfo("Connecting to Zookeeper: " + zkConnect)
    val consumerConfig = new ConsumerConfig(props)
    consumerConnector = Consumer.create(consumerConfig)
    logInfo("Connected to " + zkConnect)

    // When auto.offset.reset is defined, it is our responsibility to try and whack the
    // consumer group zk node.
    if (kafkaParams.contains("auto.offset.reset")) {
      tryZookeeperConsumerGroupCleanup(zkConnect, kafkaParams("group.id"))
    }

    val keyDecoder = classTag[U].runtimeClass.getConstructor(classOf[VerifiableProperties])
      .newInstance(consumerConfig.props)
      .asInstanceOf[Decoder[K]]
    val valueDecoder = classTag[T].runtimeClass.getConstructor(classOf[VerifiableProperties])
      .newInstance(consumerConfig.props)
      .asInstanceOf[Decoder[V]]

    // Create Threads for each Topic/Message Stream we are listening
    val topicMessageStreams = consumerConnector.createMessageStreams(
      topics, keyDecoder, valueDecoder)

    val executorPool = Executors.newFixedThreadPool(topics.values.sum)
    try {
      // Start the messages handler for each partition
      topicMessageStreams.keys.foreach{ key =>

      }
      topicMessageStreams.values.foreach { streams =>
        streams.foreach { stream => executorPool.submit(new MessageHandler(stream)) }
      }
    } finally {
      executorPool.shutdown() // Just causes threads to terminate after work is done
    }
  }

  // Handles Kafka Messages
  private class MessageHandler[K: ClassTag, V: ClassTag](stream: KafkaStream[K, V])
    extends Runnable {
    def run() {
      logInfo("Starting MessageHandler.")
      try {
        for (msgAndMetadata <- stream) {
          logInfo("received data from topic: " + msgAndMetadata.topic + ", data: " + msgAndMetadata.message())
          store((msgAndMetadata.topic, msgAndMetadata.message))
        }
      } catch {
        case e: Throwable => logError("Error handling message; exiting", e)
      }
    }
  }

  // It is our responsibility to delete the consumer group when specifying auto.offset.reset. This
  // is because Kafka 0.7.2 only honors this param when the group is not in zookeeper.
  //
  // The kafka high level consumer doesn't expose setting offsets currently, this is a trick copied
  // from Kafka's ConsoleConsumer. See code related to 'auto.offset.reset' when it is set to
  // 'smallest'/'largest':
  // scalastyle:off
  // https://github.com/apache/kafka/blob/0.7.2/core/src/main/scala/kafka/consumer/ConsoleConsumer.scala
  // scalastyle:on
  private def tryZookeeperConsumerGroupCleanup(zkUrl: String, groupId: String) {
    val dir = "/consumers/" + groupId
    logInfo("Cleaning up temporary Zookeeper data under " + dir + ".")
    val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
    try {
      zk.deleteRecursive(dir)
    } catch {
      case e: Throwable => logWarning("Error cleaning up temporary Zookeeper data", e)
    } finally {
      zk.close()
    }
  }
}