package tjs.study.notes.queue.consumeA.kafka.initial;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.internals.ConsumerCoordinator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.config.KafkaListenerContainerFactory;
import org.springframework.kafka.listener.KafkaMessageListenerContainer;
import org.springframework.stereotype.Component;
import org.springframework.util.StopWatch;

import java.util.List;
import java.util.UUID;

@Component
public class InitialKafkaConsumer {
    private static Logger log = LoggerFactory.getLogger(InitialKafkaConsumer.class);

    /**
     * 扫描&包装持有@Kafkalistener的bean：
     *      {@link org.springframework.kafka.annotation.KafkaListenerAnnotationBeanPostProcessor#postProcessAfterInitialization}
     * 
     * 
     * poll：
     * 定时线程定时尝试拉取消息：
     *      {@link  KafkaMessageListenerContainer.ListenerConsumer#run()}
     *          {@link KafkaMessageListenerContainer.ListenerConsumer#pollAndInvoke()}
     *              {@link KafkaMessageListenerContainer.ListenerConsumer#doPoll()}
     * 调用KafkaListener：
     *      {@link KafkaMessageListenerContainer.ListenerConsumer#doInvokeOnMessage(org.apache.kafka.clients.consumer.ConsumerRecord)}
     *          {@link org.springframework.messaging.handler.invocation.InvocableHandlerMethod#doInvoke}
     * 提交偏移量：
     *      {@link KafkaConsumer#poll(java.time.Duration)}
     *          {@link ConsumerCoordinator#maybeAutoCommitOffsetsSync(org.apache.kafka.common.utils.Timer)}
     * <p>
     * <p>
     * auto-commit-interval  在消费端poll时，以下位置判断 {@link ConsumerCoordinator#maybeAutoCommitOffsetsAsync(long)}
     * 查看本次poll的消息数量日志：{@link KafkaMessageListenerContainer.ListenerConsumer#debugRecords(org.apache.kafka.clients.consumer.ConsumerRecords)}
     */
    @KafkaListener(groupId = "testGroup4", topics = {"tjs_test_cousumer6"}/*, concurrency = "1"*/)
    public void receiveMessage(String message) {
        System.out.println("initial-start: " + message);
        try {
            Thread.currentThread().sleep(1 * 1000);
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        System.out.println("initial-end: " + message);
    }

    @Autowired
    private KafkaListenerContainerFactory<?> factory;

    // 此种方法，CLIENT-ID、CONSUMER-ID 为空
//    @KafkaListener(groupId = "testGroup",
//            containerFactory = "batchFactory",// 特殊指定批量的Factory
//            topicPartitions = {
//                    @TopicPartition(topic = "tjs_test_cousumer", partitions = {"0", "1"}),
////                    @TopicPartition(topic = "tjs_test_cousumer",
////                            partitionOffsets = {@PartitionOffset(partition = "0", initialOffset = "0"), @PartitionOffset(partition = "1", initialOffset = "0"), @PartitionOffset(partition = "2", initialOffset = "0")})
//            }, concurrency = "2")//concurrency就是同组下的消费者个数，就是并发消费数，建议小于等于分区总数
    public void consumerBatch(List<ConsumerRecord<String, String>> records) throws InterruptedException {
        String batchId = UUID.randomUUID().toString();
        log.info("批次{},数量{},开始。。。", batchId, records.size());
        StopWatch stopWatch = new StopWatch(batchId + "批量消费开始，数量" + records.size());
        stopWatch.start();
        for (ConsumerRecord<?, ?> record : records) {
            log.info("接收消息：{}", record.value().toString());
            Thread.currentThread().sleep(1 * 1000);
        }
        stopWatch.stop();

        log.info("批次{},数量{},结束。。。\n{}", batchId, records.size(), stopWatch.prettyPrint());
    }

    //批量消息
//    @KafkaListener(groupId = "nickyTest", topics = {"nickyTest"})
//    public void consumerBatch(List<ConsumerRecord<?, ?>> records, Acknowledgment ack) {
//        log.info("接收到消息数量：{}", records.size());
//        for (ConsumerRecord<?, ?> message : records) {
//            System.out.println("batch initial: " + message);
//        }
//        //手动提交
//        ack.acknowledge();
//    }
}