package kafka.examples.manualoffset

import java.util.Properties
import java.util.concurrent.CountDownLatch
import java.{lang, util}

import grizzled.slf4j.Logger
import kafka.examples.manualoffset.OffsetStrategy.OffsetStrategy
import org.apache.kafka.clients.consumer._
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.WakeupException

import scala.collection.JavaConverters._

/**
  * A demo consumer, using the broker auto partition assignment.
  *
  * If you use the value of the last argument equal to 0,
  * the consumer will assume that you want to start from the beginning,
  * so it will call a kafkaConsumer.seekToBeginning() method for each of its partitions.
  * If you pass a value of -1 it will assume that you want to ignore the existing messages
  * and only consume messages published after the consumer has been restarted.
  * In this case it will call kafkaConsumer.seekToEnd() on each of the partitions.
  *
  * Finally, if you specify any value other than 0 or -1 it will assume that you have specified the offset that you want the consumer to start from;
  * for example, if you pass the third value as 5, then on restart the consumer will consume messages with an offset greater than 5.
  * For this it would call kafkaConsumer.seek(<topicname>, <startingoffset>).
  *
  */
class ManualConsumer

object ManualConsumer {
  private val logger: Logger = Logger(classOf[ManualConsumer])

  def main(args: Array[String]): Unit = {
    val topicName = "heartbeatgo"
    val groupId = "java-consumer"

    //        val consumerThread: ConsumerThread = new ConsumerThread(topicName, groupId,OffsetStrategy.beginning)
    //    val consumerThread: ConsumerThread = new ConsumerThread(topicName, groupId,OffsetStrategy.ending)
    val consumerThread: ConsumerThread = new ConsumerThread(topicName, groupId, OffsetStrategy.anyplace, 100)
    //    val consumerThread: ConsumerThread = new ConsumerThread(topicName, groupId)
    consumerThread.start()

    val countDownLatch: CountDownLatch = new CountDownLatch(1)
    Runtime.getRuntime.addShutdownHook(new Thread(() => {
      logger.info("Stopping consumer ....")
      consumerThread.kafkaConsumer.wakeup()
      consumerThread.join()
      countDownLatch.countDown()
    }))
    countDownLatch.await()

  }
}

object OffsetStrategy extends Enumeration {
  type OffsetStrategy = Value
  val beginning, stay, ending, anyplace = Value
}

class ConsumerThread(topicName: String, groupId: String, strategy: OffsetStrategy = OffsetStrategy.stay, offset: Long = 0) extends Thread with OffsetCommitCallback {
  private val logger: Logger = Logger(classOf[ConsumerThread])
  private var _kafkaConsumer: KafkaConsumer[String, String] = _

  override def run(): Unit = {
    val configProperties: Properties = new Properties()
    configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.100.189.30:9092")
    configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId)
    configProperties.put(ConsumerConfig.CLIENT_ID_CONFIG, "manualOffsetCommitter")
    configProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, new lang.Boolean(false))
    configProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
    logger.info(s"using clientId=manualOffsetCommitter,groupId=$groupId,consume topic=${topicName}")
    _kafkaConsumer = new KafkaConsumer[String, String](configProperties)
    _kafkaConsumer.subscribe(util.Arrays.asList(topicName), new ConsumerRebalanceListener {
      override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {
        logger.info(s"${util.Arrays.toString(partitions.toArray)} topic-partitions are revoked from this consumer,commit the offset right now\n")
        _kafkaConsumer.commitSync()
        logger.info("commit offset successfully")
      }

      override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = {
        logger.info(s"${util.Arrays.toString(partitions.toArray)} topic-partitions are assigned to this consumer\n")
        val topicPartitionIterator = partitions.iterator
        //issue, now, will change all the assigned topic partitions, can enhance eaisily
        while (topicPartitionIterator.hasNext) {
          val topicPartition = topicPartitionIterator.next
          println("Current offset is " + kafkaConsumer.position(topicPartition) + " committed offset is ->" + kafkaConsumer.committed(topicPartition))
          if (strategy == OffsetStrategy.stay) System.out.println("Leaving it alone")
          else if (strategy == OffsetStrategy.beginning) {
            System.out.println("Setting offset to begining")
            kafkaConsumer.seekToBeginning(util.Arrays.asList(topicPartition))
          }
          else if (strategy == OffsetStrategy.ending) {
            System.out.println("Setting it to the end ")
            kafkaConsumer.seekToEnd(util.Arrays.asList(topicPartition))
          }
          else {
            System.out.println("Resetting offset to " + offset)
            kafkaConsumer.seek(topicPartition, offset)
          }
        }
      }
    })

    try {
      while (true) {
        val records: ConsumerRecords[String, String] = _kafkaConsumer.poll(100)
        for (record <- records.asScala) {
          logger.info(s"${record.timestampType()}/${record.timestamp()} | ${record.offset()} | ${record.key()}=>${record.value()}")
        }
        _kafkaConsumer.commitAsync(this)
      }
    } catch {
      case e: WakeupException => {
        logger.error("thread interrupted, ", e)
      }
    } finally {
      logger.info("commitSync begin")
      var start = System.currentTimeMillis()
      this._kafkaConsumer.commitSync()
      logger.info("")
      logger.info(s"commitSync done,begin close using=${System.currentTimeMillis() - start} mills")
      start = System.currentTimeMillis()
      this._kafkaConsumer.close()
      logger.info(s"Close consumer successfully,using=${System.currentTimeMillis() - start} mills")
    }

  } //end of run

  def kafkaConsumer: KafkaConsumer[String, String] = this._kafkaConsumer

  /*OffsetCommitCallback*/
  override def onComplete(offsets: util.Map[TopicPartition, OffsetAndMetadata], exception: Exception): Unit = {
    if (exception != null) {
      logger.error(exception)
    } else {
      if (offsets != null && !offsets.isEmpty) {
        offsets.asScala.foreach { case (key, value) => logger.info(s"offset async commit ok:${key.toString}-->${value.offset()}") }
      }
    }
  }
}
