package com.zz.common.service

import akka.Done
import akka.actor.ActorSystem
import akka.kafka.ConsumerMessage.{CommittableMessage, CommittableOffsetBatch}
import akka.kafka.scaladsl.Consumer
import akka.kafka.scaladsl.Consumer.DrainingControl
import akka.kafka.{ConsumerSettings, Subscriptions}
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink}
import akka.stream.{ActorAttributes, Materializer, Supervision}
import cats.effect.IO
import com.zz.dsp.idl.DspEvent
import com.zz.common.model.Config
import com.zz.common.service.IdService.Zidservice
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, StringDeserializer}
import scala.collection.immutable
import scala.concurrent.{Await, ExecutionContext}
import scala.concurrent.duration.Duration
import scala.util.Try
import scala.util.control.NonFatal

import com.zz.common.{BatchOffsetsB, BroadcastFlow, GlobalContext, PassThrough}

/**
  * @author huanggh
  *         19-8-16 上午11:27
  */
object IdBundleConsumer {
  def apply(config: => Config, zidService: Zidservice[IO])
           (implicit ctx:GlobalContext[IO], ec: ExecutionContext, mat: Materializer, system: ActorSystem): IdBundleConsumer
  = new IdBundleConsumer(config, zidService)
}


class IdBundleConsumer(config: => Config, zidService: Zidservice[IO])
                      (implicit ctx: GlobalContext[IO]
                       , ec: ExecutionContext
                       , mat: Materializer
                       , system: ActorSystem) {

  private[this] val logger = com.typesafe.scalalogging.Logger(getClass)

  protected val decider: Supervision.Decider = {
    case NonFatal(e) =>
      logger.error("consuming flow error! will resume", e)
      Supervision.Resume
  }

  private[this] lazy val consumerConfig: ConsumerSettings[String, Array[Byte]] =
    ConsumerSettings(system, new StringDeserializer, new ByteArrayDeserializer)
      .withClientId(java.util.UUID.randomUUID().toString.toUpperCase)
      .withGroupId(config.groupId)

  private[this] val consumingStream: RunnableGraph[DrainingControl[Done]] =
    Consumer
      .committablePartitionedSource(consumerConfig, Subscriptions.topics(config.topicIn))
      .flatMapMerge(config.maxPartitions, _._2)
      .groupedWithin(config.batchSize, config.batchTimeWindow)
      .map((commitableMessages: immutable.Seq[CommittableMessage[String, Array[Byte]]]) =>{

        val offsets: CommittableOffsetBatch =
          commitableMessages.map(_.committableOffset)
            .foldLeft(CommittableOffsetBatch.empty) {
              case (batch, p) => batch.updated(p)
            }

        val buffs = commitableMessages.map(_.record.value)

        PassThrough(convertFromProtocolBuffersToEntities(buffs), BatchOffsetsB(offsets))
      })
      .via(BroadcastFlow(zidService))
      .collect { case BatchOffsetsB(x) => x }
      .mapAsync(6) { offsetsBatch: CommittableOffsetBatch =>
        offsetsBatch.commitScaladsl()
      }
      .withAttributes(ActorAttributes.supervisionStrategy(decider))
      .toMat(Sink.ignore)(Keep.both)
      .mapMaterializedValue(DrainingControl.apply)

  /**
    * convert sequence of byte stream to entities, omit any invalid ones
    * @param buffs sequence of bytes
    * @return entities
    */
  private[this] def convertFromProtocolBuffersToEntities(buffs:Iterable[Array[Byte]]) =
    buffs.map(convertFromProtocolBufferToEntity).collect{case Some(entity)=>entity}.toSeq

  /**
    * convert byte stream to entity
    * @param buff byte stream
    * @return entity
    */
  private[this] def convertFromProtocolBufferToEntity(buff:Array[Byte]): Option[DspEvent] =
    Try{DspEvent.parseFrom(buff)}.toOption

  def run():IO[DrainingControl[Done]] = IO {
    val dc = consumingStream.run()
    scala.sys.addShutdownHook {
      Await.result(dc.drainAndShutdown(), Duration.Inf)
    }
    dc
  }

}
