package com.zf.kafka.consumer.listener;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.header.Headers;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.stereotype.Component;

import java.util.List;
import java.util.StringJoiner;

/**
 * 注解方式消费Topic中的消息
 */
@Component
public class TopicConsumer {


    /**
     * 获取单条消息，需要在application.yml配置 listener.type=single 或者 在 KafkaListener注解中配置batch = "false"，KafkaListener注解的优先级更高：
     * <p>
     * 注意：
     * 1、没有开启批量消费时，使用 ConsumerRecord<String, String> 接收，否则报错；
     * 2、开启批量消费时，就算只获取到了1条消息也会被封装成数组，所以必须使用 List<ConsumerRecord<String, String>> 接收，否则报错；
     */
    // @KafkaListener(topics = "test", batch = "false")
    // public void handleMsg(ConsumerRecord<String, String> record) {
    //     String topic = record.topic();
    //     Headers headers = record.headers();
    //     StringJoiner sj = new StringJoiner(", ", "{", "}");
    //     for (Header header : headers) {
    //         sj.add(header.key()+":"+ new String(header.value()));
    //     }
    //     String key = record.key();
    //     String msg = record.value();
    //     long offset = record.offset();
    //     int partition = record.partition();
    //     System.out.printf("topic: `%s`, headers: `%s`, key: `%s` msg: `%s`, offset: `%d`, partition: `%d`\n", topic, sj, key,  msg, offset, partition);
    // }

    /**
     * 手动提交offset, 需要设置 listener.ack-mode=manual, enable-auto-commit=false, 是全局配置
     */
    // @KafkaListener(topics = "test")
    // public void handleMsg(ConsumerRecord<String, String> record, Acknowledgment acknowledgment) {
    //         String topic = record.topic();
    //         String msg = record.value();
    //         long offset = record.offset();
    //         int partition = record.partition();
    //         System.out.printf("topic: %s, msg: %s, offset: %d, partition: %d\n", topic, msg, offset, partition);
    //         // 手动提交，需要设置
    //         acknowledgment.acknowledge();
    // }


    /**
     * 1、设置 KafkaListener的属性 batch="true" 表示批量消费消息，也可以在application.yml配置文件中全局设置listener.type=batch，KafkaListener注解的优先级更高。
     * 2、“properties = {"max.poll.records:20"}” 指定每次最大拉取多少条消息，能够拉取多条消息的前提是开启批量消费，参见 org.springframework.boot.autoconfigure.kafka.KafkaProperties.Consumer#buildProperties()，也可以在配置文件中全局指定，KafkaListener注解的优先级更高。
     * <p>
     * 注意：一旦开启了 批量消费（listener.type=batch 或者 batch="true"），就算指定每次最大拉取1条消息，也会把消息变成数组, 所以必须用 List<ConsumerRecord<String, String>> 接收，否则报错
     */
    @KafkaListener(topics = "test", batch = "true", properties = {"max.poll.records:20"})
    public void handleMsgBatch(List<ConsumerRecord<String, String>> records) {
        System.out.println("本次消费的消息数量：" + records.size());
        for (ConsumerRecord<String, String> record : records) {
            String topic = record.topic();
            Headers headers = record.headers();
            StringJoiner sj = new StringJoiner(", ", "{", "}");
            for (Header header : headers) {
                sj.add(header.key()+":"+ new String(header.value()));
            }
            String key = record.key();
            String msg = record.value();
            long offset = record.offset();
            int partition = record.partition();
            System.out.printf("topic: `%s`, headers: `%s`, key: `%s` msg: `%s`, offset: `%d`, partition: `%d`\n", topic, sj, key,  msg, offset, partition);
        }
    }


}
