package com.charlotte.gupao.study.commit;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 * @author junjie.ding
 * @date 2020/11/4 15:59
 * @description TODO
 */
public class CommitConsumer {

    public static void main(String[] args) {
        Properties props = new Properties();
        // kafka服务地址
        props.put("bootstrap.servers", "172.31.21.19:9092");

        // 反序列化
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        props.setProperty("group.id", "test");
        // 取消自动commit
//        props.setProperty("enable.auto.commit", "true");
//        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("enable.auto.commit", "false");

        // 从最早的数据开始消费 earliest | latest | none
        props.put("auto.offset.reset","earliest");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Arrays.asList("commit-topic-test3"));

        final int minBatchSize = 10;
        List<ConsumerRecord<String, String>> buffer = new ArrayList<>();
        // 手动提交
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("offset = %d ,key =%s, value= %s, partition= %s%n" ,record.offset(),record.key(),record.value(),record.partition());
                buffer.add(record);
            }
            if (buffer.size() >= minBatchSize) {
                // 同步提交
                consumer.commitSync();
                buffer.clear();
            }
        }
    }
}
