package com.open.source.kafka.delay;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.DelayQueue;
import java.util.concurrent.SynchronousQueue;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.LockSupport;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.boot.ApplicationArguments;
import org.springframework.boot.ApplicationRunner;
import org.springframework.boot.autoconfigure.kafka.KafkaProperties;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.kafka.core.KafkaTemplate;

/**
 * @author ZonLen since on 2022/8/9 下午3:12
 */
@Slf4j
public abstract class AbstractKafkaDelayConsumerListener implements DelayPartitionStrategy,
    ApplicationContextAware, ApplicationRunner, DisposableBean {

  private ApplicationContext applicationContext;

  /**
   * 消费线程池
   */
  private static final ThreadPoolExecutor CONSUMER_THREAD_POOL = new ThreadPoolExecutor(2, 16, 60,
      TimeUnit.SECONDS, new SynchronousQueue<>(),
      (ThreadFactory) Thread::new);
  /*
   * 当前暂停分区的容器，为了解决多分区消费阻塞问题。
   * 比如：
   * 一个消费者消费两个分区（分区一、二）；
   * 分区二满足了延迟消费条件的数据，分区一有数据但不满足条件，
   * 消费者消费时会获取到两个分区的数据，假设分区一的数据比较早，消费比较靠前，就需要跳过分区一进行下一个分区的消费，
   * 但是延迟队列又在另外线程中（如果不在另外的线程本身是阻塞的，会阻塞消费者消费），所以使用当前暂停分区容器进行资源共享
   */
  private static final Map<String, List<TopicPartition>> CUR_CONSUMER_PAUSED_TOPIC_PARTITION = new ConcurrentHashMap<>();
  /**
   * 主题分区延迟队列
   */
  private static final DelayQueue<DelayTopicPartition> PAUSED_TOPIC_PARTITION_DELAY_QUEUE = new DelayQueue<>();

  private static volatile boolean exit = false;

  /*
   * 开启延迟队列消费线程，该线程是所有延迟消费公共使用，
   * 实现如下:（如果消费过程正出现 ReBalance 相关的延迟分区转移到其他的消费者上，当前消费者会出现异常，后续再考虑处理）
   * 1. 消费者将暂停的延迟分区offer到延迟队列DelayQueue中，
   * 2. 延迟队列将满足延迟消费条件的分区take出来，放入一个容器中（该容器也是全局的针对所有延迟消费者）容器存储结构类似一个散列桶，不同的主题放入不同的槽位上，每个槽位会存储对应的暂停分区数据集
   * 3. 消费者轮训，从容器中踢出，消费者唤醒暂停分区进行poll
   */
  static {
    new Thread(() -> {
      while (!exit) {
        try {
          final DelayTopicPartition delayTopicPartition = PAUSED_TOPIC_PARTITION_DELAY_QUEUE.take();
          final String topic = delayTopicPartition.getTopic();
          CUR_CONSUMER_PAUSED_TOPIC_PARTITION.computeIfAbsent(topic, t -> new ArrayList<>())
              .add(new TopicPartition(topic, delayTopicPartition.getPartition()));
        } catch (InterruptedException e) {
          log.error("", e);
        }
      }
    }).start();
  }

  private KafkaProperties kafkaProperties;

  /**
   * 消费监听
   */
  @Override
  public void run(ApplicationArguments args) {
    try {
      kafkaProperties = applicationContext.getBean(KafkaProperties.class);
    } catch (BeansException e) {
      log.info("Kafka delay topic<{}> custom listener<{}> Is not enabled", delayTopic(),
          getClass().getName());
      return;
    }
    CONSUMER_THREAD_POOL.execute(this::delayConsumer);
  }

  /**
   * 消费退出，释放资源
   */
  @Override
  public void destroy() {
    exit = true;
    log.info("kafka delay consumer topic <{}> listener stop", delayTopic());
  }

  /**
   * 延迟消费
   */
  private void delayConsumer() {
    final String topic = delayTopic();
    final KafkaConsumer<String, String> consumer = kafkaConsumerInstance();
    final Set<TopicPartition> partitions = consumer.assignment();
    //唤醒消费者所有分区
    consumer.resume(partitions);
    //初始化kafka消费者
    do {
      //获取并清除暂停的延迟分区（同一消费者组多线程消费有问题，会存在唤醒分区异常，各个消费者之间存在分区的balance）
      final List<TopicPartition> topicPartitions = CUR_CONSUMER_PAUSED_TOPIC_PARTITION
          .remove(topic);
      if (CollectionUtils.isNotEmpty(topicPartitions)) {
        //唤醒暂停满足条件的延迟分区
        consumer.resume(topicPartitions);
      }
      //阻塞时间最长不大于10s
      final ConsumerRecords<String, String> consumerRecords = consumer
          .poll(Duration.ofMillis(10000));
      if (consumerRecords.isEmpty()) {
        //如果所有延迟消费分区都没有数据,挂起当前延迟消费线程3s,可能因为这个会存在不大于3s中的消费延迟误差 例如：延迟10s,实际延迟可能在10s-13s
        LockSupport.parkUntil(System.currentTimeMillis() + 3000L);
        continue;
      }
      final Map<Integer, List<ConsumerRecord<String, String>>> partitionRecordMap = new HashMap<>();
      //按照分区进行分组消费
      consumerRecords.forEach(record -> partitionRecordMap
          .computeIfAbsent(record.partition(), partition -> new ArrayList<>()).add(record));
      partitionRecordMap.forEach((curPartition, partitionRecords) -> {
        for (ConsumerRecord<String, String> record : partitionRecords) {
          //满足延迟条件，进行处理
          final long delayTime = record.timestamp() +
              curPartitionDelayTime(curPartition) - System.currentTimeMillis();
          if (delayTime <= 0) {
            final Map<TopicPartition, OffsetAndMetadata> partitionOffset = new HashMap<>();
            partitionOffset.put(new TopicPartition(topic, curPartition),
                new OffsetAndMetadata(record.offset()));
            try {
              delayDealWith(record);
              //如果成功则提交位移
              consumer.commitSync(partitionOffset);
              //如果异常进行下一阶延迟策略
            } catch (Exception e) {
              log.error("延迟消费失败,topic:{},partition:{},失败消息数据:{}", topic, curPartition, record);
              //如果最大延迟处理仍然失败，消息丢弃
              if (curPartition == maxPartition()) {
                log.error("延迟消费最终失败,topic:{},partition:{},失败消息内容:{}", topic, curPartition, record);
                tryLastTime(record);
                consumer.commitSync(partitionOffset);
                continue;
              }
              final ProducerRecord<String, String> producerRecord = new ProducerRecord<>(
                  topic, curPartition + 1, System.currentTimeMillis(),
                  record.key(), record.value(), record.headers());
              //发送到对应的下一延迟分区
              applicationContext.getBean(KafkaTemplate.class).send(producerRecord);
              //当前分区同步提交位移
              consumer.commitSync(partitionOffset);
            }
            //延迟1s再进行消费
          } else {
            final TopicPartition partition = new TopicPartition(topic, curPartition);
            //将延迟的消息放入延迟队列
            PAUSED_TOPIC_PARTITION_DELAY_QUEUE
                .offer(new DelayTopicPartition(topic, curPartition, delayTime));
            //暂停当前分区
            consumer.pause(Collections.singletonList(partition));
            //重新定义offset，下次从当前offset消费
            consumer.seek(partition, record.offset());
            break;
          }
        }
      });
    } while (!exit);
    consumer.close();
  }

  /**
   * 满足延迟条件，进行消息处理
   */
  protected abstract void delayDealWith(ConsumerRecord<String, String> record);

  /**
   * 延迟策略，全部消费失败，最后兜底处理
   */
  protected abstract void tryLastTime(ConsumerRecord<String, String> record);

  /**
   * 初始化消费者
   */
  private KafkaConsumer<String, String> kafkaConsumerInstance() {
    final Map<String, Object> consumerProperties = kafkaProperties.buildConsumerProperties();
    //关闭自动提交，进行手动提交
    consumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
    //控制一个poll()调用返回的记录数，这个可以用来控制应用在拉取循环中的处理数据量
    consumerProperties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 100);
    //消费者会话过期时间，如果消费者在这段时间内没有发送心跳，那么broker将会认为会话过期而进行分区重平衡
    consumerProperties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 60000);

    //设置延迟队列的消费者组
    if (!consumerProperties.containsKey(ConsumerConfig.GROUP_ID_CONFIG)) {
      consumerProperties
          .put(ConsumerConfig.GROUP_ID_CONFIG, "TAX_CLOUD_DEFAULT_DELAY_CONSUMER_GROUP");
    }
//    consumerProperties.put("ackMode", AckMode.MANUAL);
    final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(
        consumerProperties);
    //订阅topic
    consumer.subscribe(Collections.singletonList(delayTopic()));
    return consumer;
  }

  @Override
  public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
    this.applicationContext = applicationContext;
  }
}
