package com.learn.kafka.commit;


import com.learn.kafka.config.ConsumerProperties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * 消费者自动提交
 * @author: learn.kafka
 */
public class CommitConsumer extends ConsumerProperties {
    public static void main(String[] args) {
        // 创建消费者实例
        CommitConsumer consumer = new CommitConsumer();
        consumer.startConsuming();
    }
    // 构造方法：可覆盖默认配置
    public CommitConsumer() {
        super(); // 调用父类构造方法，加载默认配置
        // 如需修改配置，可在此处通过 setConfig 覆盖
        // 例如：super.setConfig("bootstrap.servers", "192.168.44.160:9092");
//        super.setConfig("group.id", "mate-test-group1");
    }

    // 开始消费消息
    private void startConsuming() {
        // 调用父类方法获取配置，创建消费者实例，消除重复代码
        KafkaConsumer<Integer, String> consumer = new KafkaConsumer<Integer, String>(super.getProps());

        // 订阅主题
        consumer.subscribe(Arrays.asList("commit-test"));

        final int minBatchSize = 200;
        List<ConsumerRecord<Integer, String>> buffer = new ArrayList<>();

        // 消费消息并处理
        while (true) {
            ConsumerRecords<Integer, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<Integer, String> record : records) {
                System.out.printf(
                        "offset = %d, key = %s, value = %s, partition = %d%n",
                        record.offset(), record.key(), record.value(), record.partition()
                );
                buffer.add(record);
            }

            // 达到批量阈值时提交偏移量
            if (buffer.size() >= minBatchSize) {
                consumer.commitSync(); // 同步提交
                buffer.clear();
            }
        }
    }
}
