package com.hmc.kafka.consumer;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Collection;
import java.util.Properties;

/**
 * kafka默认使用自动提交功能，消费者每隔5s提交一次offset
 *  缺点：当提交完之后，消费者未进行消费，会引起提前消费
 * 开启手动提交功能
 */
public class CustomConsumerCommit {
    public static void main(String[] args) {

        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"hadoop102:9092,hadoop103:9092");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,StringDeserializer.class.getName());
        properties.put(ConsumerConfig.GROUP_ID_CONFIG,"test");

        /*
        //开启自动提交，设置自动提交时间以毫秒为单位
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,true);
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,1000);
        */

        //关闭自动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,false);
        KafkaConsumer consumer = new KafkaConsumer(properties);
        Collection<String> list = Arrays.asList("first");
        consumer.subscribe(list);
        while (true) {
            ConsumerRecords<String,String> message = consumer.poll(Duration.ofSeconds(2));
            message.iterator().forEachRemaining(System.out::println);
            //异步提交，不需要等待提交完成，直接消费下一条数据
            consumer.commitSync();
            //同步提交，必须提交完才能进行下一条数据的消费
//            consumer.commitAsync();
        }

    }
}
