/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements.  See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.genitus.drum.service

import java.util.Properties
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.AtomicBoolean

import com.codahale.metrics.{Meter, MetricRegistry}
import com.google.common.util.concurrent.AbstractIdleService
import com.google.inject.Inject
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.genitus.drum.conf.KafkaConf
import org.slf4j.LoggerFactory

import scala.collection.JavaConversions._

/**
  * Kafka consumer service.
  *
  * @author gwjiang (gwjiang@iflytek.com), 2017/3/2.
  */
class KafkaService @Inject() (
  val consumer: KafkaClient
) extends AbstractIdleService {
  /** logger. */
  private[this] val log = LoggerFactory.getLogger(getClass.getName)

  /**
    * @inheritdoc
    */
  override def startUp(): Unit = {
    log.info("# -------- gwjiang: start Kafka service ...")
    consumer.start()
  }

  /**
    * @inheritdoc
    */
  override def shutDown(): Unit = {
    log.info("# -------- gwjiang: stop Kafka service ...")
    consumer.shutdown()
  }
}

/**
  * Shutdownable thread for kafka consumer.
  *
  * @param name thread name.
  * @param isInterruptible interruptible.
  *
  * @author gwjiang (gwjiang@iflytek.com), 2017/3/2.
  */
abstract class ShutdownableThread(
  val name: String,
  val isInterruptible: Boolean = true
) extends Thread(name) {
  /** logger. */
  private[this] val log = LoggerFactory.getLogger(getClass.getName)

  // user thread
  this.setDaemon(false)

  /** running tag. */
  val isRunning: AtomicBoolean = new AtomicBoolean(true)

  /** latch. */
  private val shutdownLatch = new CountDownLatch(1)

  /**
    * Shutdown.
    */
  def shutdown(): Unit = {
    initiateShutdown()
    awaitShutdown()
  }

  /**
    * Init shutdown.
    *
    * @return self is interrupted.
    */
  def initiateShutdown(): Boolean = {
    if (isRunning.compareAndSet(true, false)) {
      log.info("Shutting down")
      isRunning.set(false)
      if (isInterruptible) {
        interrupt()
      }
      true
    } else
      false
  }

  /**
    * After calling initiateShutdown(), use this to wait until the shutdown is complete.
    */
  def awaitShutdown(): Unit = {
    shutdownLatch.await()
    log.info("Shutdown completed")
  }

  /**
    * This method is repeatedly invoked until the thread shuts down
    * or this method throws an exception.
    */
  def doWork(): Unit

  /**
    * @inheritdoc
    */
  override def run(): Unit = {
    log.info("Starting ")
    try{
      while(isRunning.get()){
        doWork()
      }
    } catch {
      case e: Throwable =>
        if(isRunning.get()) {
          log.error(s"Error due to ${e.getMessage}")
        }
    }
    shutdownLatch.countDown()
    log.info("Stopped ")
  }
}

/**
  * Kafka consumer.
  *
  * @param conf kafka conf.
  * @param buffService buffer service.
  *
  * @author gwjiang (gwjiang@iflytek.com), 2017/3/2.
  */
class KafkaClient @Inject() (
  val conf: KafkaConf,
  val buffService: BuffService,
  val metricRegistry: MetricRegistry
) extends ShutdownableThread(
  name = "kafka-consumer",
  isInterruptible = false) {
  val acceptEventMeter: Meter = metricRegistry.meter(s"kafka.event-accept-meter")

  val props: Properties = new Properties
  props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, conf.kafkaBootstrapServers())
  props.put(ConsumerConfig.GROUP_ID_CONFIG, conf.kafkaConsumerGroup())
  props.put(ConsumerConfig.CLIENT_ID_CONFIG, conf.kafkaClientId())
  props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true")
  props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000")
  props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000")
  props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
  props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer")

  val consumer = new KafkaConsumer[String, Array[Byte]](props)
  consumer.subscribe(List(conf.kafkaTopic()))

  override def doWork(): Unit = {
    val records = consumer.poll(1000)
    if (records.nonEmpty) {
      val messages = records.map(x => (x.key(), x.value())).toList
      buffService.append(messages)
      acceptEventMeter.mark(messages.length)
    }
  }
}
