package com.sunday.common.mq.kafka.study.spring.e7_Pull_Messages;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.springframework.beans.BeansException;
import org.springframework.beans.factory.InitializingBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;
import org.springframework.kafka.core.DefaultKafkaConsumerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.SendResult;
import org.springframework.kafka.support.TopicPartitionOffset;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.time.Duration;
import java.time.Instant;
import java.util.Arrays;
import java.util.Collection;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;

/**
 * {@link org.springframework.boot.autoconfigure.kafka.KafkaAutoConfiguration}
 */
@Slf4j
@RestController
@RequestMapping("/test")
public class MQApi implements ApplicationContextAware, InitializingBean {

    private ApplicationContext applicationContext;

    @Override
    public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
        this.applicationContext = applicationContext;
    }

    @Override
    public void afterPropertiesSet() {
        DefaultKafkaConsumerFactory defaultKafkaConsumerFactory = applicationContext.getBean(DefaultKafkaConsumerFactory.class);
        defaultKafkaConsumerFactory.updateConfigs(Map.of(ConsumerConfig.GROUP_ID_CONFIG, "myId_5"));
        kafkaTemplate.setConsumerFactory(defaultKafkaConsumerFactory);
    }

    @Autowired
    private KafkaTemplate<Object, Object> kafkaTemplate;

    @GetMapping("/send/{topic}")
    public void send(@PathVariable String topic, @PathVariable String key) throws ExecutionException, InterruptedException {
        CompletableFuture<SendResult<Object, Object>> future;
        future = kafkaTemplate.send(topic, key, key);
        log.info("{}", future.get());
    }

    @GetMapping("/batch/{topic}")
    public void batch(@PathVariable String topic) {
        CompletableFuture<SendResult<Object, Object>> future;
        for (int i = 0; i < 10; i++) {
            future = kafkaTemplate.send(topic, i + "", i + "");
            future.whenComplete((result, throwable) -> log.info("[{}] result : {}, throwable : {}", topic, result, throwable));
        }
    }

    @GetMapping("/pull/{topic}")
    public void pull(@PathVariable String topic) {

        ConsumerRecord<Object, Object> consumerRecord;

        /**
         * 这段代码是使用 Spring for Apache Kafka 中的 KafkaTemplate 从指定主题进行接收消息。下面是各参数的含义：
         * topic: 这是要从中接收消息的 Kafka 主题的名称。
         * 1: 这表示要接收消息的分区号，这里是指定为分区1。
         * 0: 这是要从分区中开始读取消息的偏移量(offset)。
         * Duration.ofSeconds(1): 这表示接收消息的超时时间，即最多等待1秒来接收消息。
         * 因此，这行代码的意思是从指定主题的特定分区中获取消息，并从偏移量0开始，在等待最多1秒后返回结果。
         */
        consumerRecord = kafkaTemplate.receive(topic, 1, 0, Duration.ofSeconds(1));

        Instant instant = Instant.ofEpochMilli(consumerRecord.timestamp());
        log.info("{}", instant);
        log.info("========= {}", consumerRecord);
        /**
         * 如果指定的偏移量（offset）在对应的分区中不存在，Kafka 将会根据指定的参数进行处理。在这种情况下，可能会出现以下情况之一：
         * 如果使用 Kafka 0.11 版本或更早的版本，Kafka 将返回一个异常，表示无法找到该偏移量。
         * 对于较新的 Kafka 版本，可能会根据实际情况返回最早的有效偏移量的消息，即从分区的起始位置开始读取消息。
         * 因此，在实际编程中，需要谨慎处理不存在偏移量的情况，确保代码能够正确地处理这种异常状态。
         */
        consumerRecord = kafkaTemplate.receive(topic, 1, 100, Duration.ofSeconds(2));
        log.info("========= {}", consumerRecord);

        ConsumerRecords<Object, Object> consumerRecords;

        Collection<TopicPartitionOffset> requested = Arrays.asList(
                new TopicPartitionOffset(topic, 0, 0L),
                new TopicPartitionOffset(topic, 1, 0L),
                new TopicPartitionOffset(topic, 2, 0L)
        );
        consumerRecords = kafkaTemplate.receive(requested);
        consumerRecords.forEach(record -> log.info("========= {}", record));
    }

}
