package io.gatling.extensions.kafka.action

import java.time.Duration
import java.util
import java.util.concurrent.Executors

import com.typesafe.scalalogging.StrictLogging
import io.gatling.commons.stats.OK
import io.gatling.commons.util.Clock
import io.gatling.core.CoreComponents
import io.gatling.core.action.{Action, ExitableAction}
import io.gatling.core.session.Session
import io.gatling.core.stats.StatsEngine
import io.gatling.core.util.NameGen
import io.gatling.extensions.kafka.protocol.kafkaConsumerComponents
import org.apache.kafka.clients.consumer.ConsumerRecords

import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future}

case class KafkaConsumerAction(
                                name: String,
                                topics: util.ArrayList[String],
                                kafkaConsumerComponents: kafkaConsumerComponents,
                                coreComponents: CoreComponents,
                                throttled: Boolean,
                                next: Action,
                              ) extends ExitableAction with NameGen with StrictLogging {

  // 创建一个隐式的无限大小的线程池
  implicit val threads: ExecutionContextExecutor = ExecutionContext.fromExecutor(Executors.newCachedThreadPool())

  override def statsEngine: StatsEngine = coreComponents.statsEngine

  override def clock: Clock = coreComponents.clock

  override def execute(session: Session): Unit = {

    println("哈哈哈哈哈哈哈哈")

    // 直接使用一个无限大的线程池来出来消费者，实际上消费者不应过多，收到kafka分片数量的限制
    Future {
      kafkaConsumerComponents.consumerMap.get(session.userId).subscribe(topics)
      while (true) {
        val start: Long = clock.nowMillis
        val result: ConsumerRecords[String, String] = kafkaConsumerComponents.consumerMap.get(session.userId).poll(Duration.ofMillis(100))
        for (_ <- 1 to result.count()) {
          statsEngine.logResponse(session.scenario, session.groups, name, start, clock.nowMillis, OK, None, None)
        }
      }
    }
  }
}
