package com.example.springbootkafkalearning.mq;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.messaging.MessageHeaders;
import org.springframework.messaging.support.MessageHeaderAccessor;
import org.springframework.stereotype.Component;

import java.util.Date;

import static com.example.springbootkafkalearning.controller.KafkaController.TOPIC_NAME;

/**
 * @author 罗俊华
 * @date 2021/12/17 - 4:10 下午
 */
@Slf4j
@Component
public class KafkaConsumer {

    //    @KafkaListener(topics = TOPIC_NAME, groupId = "spring-kafka-consumer-group")
    public void listener1(ConsumerRecord<String, String> consumerRecord, Acknowledgment ack, MessageHeaders messageHeaders, MessageHeaderAccessor messageHeaderAccessor) {
        consume(consumerRecord, ack);
    }


    /**
     * luo@ubuntu:~/kafka_2.13-2.4.1/bin$ ./kafka-topics.sh  --bootstrap-server 192.168.50.205:9090 --topic multiple-partition-replication-topic --create --partitions 3
     * luo@ubuntu:~/kafka_2.13-2.4.1/bin$ ./kafka-topics.sh  --bootstrap-server 192.168.50.205:9090 --topic multiple-partition-replication-topic --describe
     * Topic: multiple-partition-replication-topic	PartitionCount: 3	ReplicationFactor: 1	Configs: segment.bytes=1073741824
     * 	Topic: multiple-partition-replication-topic	Partition: 0	Leader: 0	Replicas: 0	Isr: 0
     * 	Topic: multiple-partition-replication-topic	Partition: 1	Leader: 2	Replicas: 2	Isr: 2
     * 	Topic: multiple-partition-replication-topic	Partition: 2	Leader: 1	Replicas: 1	Isr: 1
     * @param consumerRecord
     * @param ack
     */
    @KafkaListener(groupId = "spring-group", topicPartitions = {
            @TopicPartition(topic = "multiple-partition-replication-topic", /*partitions = {"0", "1", "2"},*/ partitionOffsets = {
                    @PartitionOffset(partition = "0", initialOffset = "5"), // 分区 0 从 offset = 5 开始消费
                    @PartitionOffset(partition = "1", initialOffset = "50"), // 分区 1 从 offset = 50 开始消费
                    @PartitionOffset(partition = "2", initialOffset = "25") // 分区 2 从 offset = 25 开始消费
            })
    })
    public void consumeByOffset(ConsumerRecord<String, String> consumerRecord, Acknowledgment ack) {

        consume(consumerRecord, ack);

    }


    private void consume(ConsumerRecord<String, String> consumerRecord, Acknowledgment ack) {
        // 注意配置 spring.kafka.consumer.auto-offset-reset


        log.info("收到消息：主题：{}分区：{}偏移量：{}日期：{}，内容：{}", consumerRecord.topic(), consumerRecord.partition(), consumerRecord.offset(), new Date(consumerRecord.timestamp()), consumerRecord.value());

        ack.acknowledge(); // 手动 ack
//        开启手动ack，需要配置 spring.kafka.consumer.enable-auto-commit
//        手动ack之后的后续操作由 spring.kafka.listener.ack-mode 决定
    }


}
