package com.gzsxy.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.PartitionOffset;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

/**
 * @author xiaolong
 * @version 1.0
 * @description: 消费者
 * @date 2022/7/9 14:04
 */
@Component
public class MyConsumer {
    /**
     * @description: enable-auto-commit: false必须设置为手动提交ack才生效
     * @param
     * @return
     */
    @KafkaListener(topics = "test",groupId = "MyGroup1")
    public void listenGroup(ConsumerRecord<String, String> record, Acknowledgment ack) {
        String value = record.value();
        System.out.println(value);
        System.out.println(record);
        //手动提交offset
        ack.acknowledge();
    }


    /**
     * @description: 指定消费 testGroup组中topic1主题 0和1分区和topic2 主题 0号分区和1号分区从100偏移量进行消费
     * @param
     * @return
     */
    @KafkaListener(groupId = "testGroup", topicPartitions = {
            @TopicPartition(topic = "topic1", partitions = {"0", "1"}),
            @TopicPartition(topic = "topic2", partitions = "0",partitionOffsets = @PartitionOffset(partition = "1",initialOffset = "100"))}
            ,concurrency = "3")//concurrency就是同组下的消费者个数，就是并发消费数，建议小于等于分区总数
    public void listenGroup1(ConsumerRecord<String, String> record,Acknowledgment ack) {
        String value = record.value();
        System.out.println(value);
        System.out.println(record);
        //手动提交offset
        ack.acknowledge();
    }


}
