package org.app

import java.lang.Long
import java.util.concurrent.{ConcurrentHashMap, Executors, TimeUnit}

import com.codahale.metrics.{ConsoleReporter, Gauge, MetricFilter, MetricRegistry}
import com.typesafe.config.ConfigFactory
import metrics_influxdb.{HttpInfluxdbProtocol, InfluxdbReporter}
import org.slf4j.LoggerFactory

import scala.concurrent.ExecutionContext
import scala.util.{Failure, Success}

object App {
  val logger = LoggerFactory.getLogger(classOf[App])

  def main(args: Array[String]): Unit = {
    implicit val mr = new MetricRegistry()
    val influxSettings = InfluxSettings.apply(ConfigFactory.load())
    val consoleReporter = ConsoleReporter.forRegistry(mr)
      .convertDurationsTo(TimeUnit.MILLISECONDS).filter(MetricFilter.ALL)
    consoleReporter.build().start(30, TimeUnit.SECONDS)

    val reporter = InfluxdbReporter.forRegistry(mr)
      .protocol(new HttpInfluxdbProtocol(influxSettings.host, influxSettings.port, "flume"))
      .convertRatesTo(TimeUnit.SECONDS).convertDurationsTo(TimeUnit.MILLISECONDS)
      .filter(MetricFilter.ALL).skipIdleMetrics(false)
      //.tag("cluster", "flume-log")
      .tag("server", "default").build
    reporter.start(influxSettings.interval, TimeUnit.SECONDS)
    logger.info("---logger---start-----")

    Runtime.getRuntime.addShutdownHook(new Thread(new Runnable() {
      override def run(): Unit = {
        reporter.close()
      }
    }))


    // import scala.concurrent.ExecutionContext.Implicits.global
    implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2))


    val kafkaSettings = KafkaSettings.apply(ConfigFactory.load())
    val service = new KafkaConsumerGroupService(kafkaSettings)
    // service.list().foreach(s=>println(s))

    //implicit var counters: Map[String, Long] = Map()
    implicit var counters = new ConcurrentHashMap[String,Long]()

    def func(key:String)(implicit map: ConcurrentHashMap[String,Long]/*Map[String, Long]*/, metricRegistry: MetricRegistry)={
      if (!metricRegistry.getMetrics.containsKey(key)) {
        metricRegistry.register(key, new Gauge[Long]() {
          def getValue = {
            // map.get(key).getOrElse(0L)
            map.get(key)
          }
        })
      }
    }

    while(true){
      kafkaSettings.groups.foreach(group => {
        var total = 0L
        service.describeConsumerGroup(group) onComplete {
          case Success(consumerSumary) => for (consum <- consumerSumary) {
            //val offset = group + "_" + consum.partition + "_offest";
            val lag = group + "_" + consum.partition + "_lag";
            val logEndOffset = group + "_" + consum.partition + "_logEndOffset"
            // counters += (offset -> consum.offset.get)
            // counters += (lag -> consum.lag.get)
            // counters += (logEndOffset -> consum.logEndOffset.get)
            counters.put(lag ,consum.lag.get)
            counters.put(logEndOffset ,consum.logEndOffset.get)
            total = total + consum.lag.get

            Array(/*offset, */lag, logEndOffset).foreach(func)
          }
            val key = group + "_toal"
            // counters += (key -> total)
            counters.put(key ,total)
            func(key)
          case Failure(t) => println("An error has occured: " + t.getMessage)
        }
      })
      Thread sleep 30000
    }

  }
}
