package cn.z2huo.demo.kafka.apache.consumer;

import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

/**
 * <p>自动提交偏移量的消费者
 *
 * @author z2huo
 */
@Slf4j
public class AutoCommitConsumer {

    public static final String TOPIC = "test-topic";

    public static final String GROUP_ID = "my-consumer-group";

    public static void main(String[] args) {
        new AutoCommitConsumer().consume();
    }

    /**
     * <p>自动提交偏移量
     * <p>自动提交偏移量时，如果不指定 group.id 将会报错
     */
    public void consume() {
        Properties properties = ConsumerProperties.getConsumerProperties();
        // enable.auto.commit 配置为 true 时，必须要指定 group id，否则会报错如下
        // Exception in thread "main" org.apache.kafka.common.errors.InvalidConfigurationException: enable.auto.commit cannot be set to true when default group id (null) is used.
        properties.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        // 消费者偏移量自动提交到 Kafka 的频率
        properties.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
//        properties.setProperty("group.id", GROUP_ID);
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, GROUP_ID + "-" + RandomStringUtils.randomNumeric(4));

        try (KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
            // 使用 consumer.subscribe() 将会动态分配分区（即加入消费者组），则必须指定 group.id
            consumer.subscribe(Collections.singletonList(TOPIC));
            // 不断地拉取和处理消息
            while (true) {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
                for (ConsumerRecord<String, String> record : records) {
                    log.info("Received message, partition is {}, offset is {}, key is {}, value is {}",
                            record.partition(), record.offset(), record.key(), record.value());
                }
            }
        }
    }

}
