package com.gator.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.Properties;


public class PABLO_KafkaConsumer extends Thread {

    KafkaConsumer<Integer, String> consumer;
    String topic;

    public PABLO_KafkaConsumer(String topic) {
        Properties properties = new Properties();
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092,localhost:9093,localhost:9094");
        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "pablo-consumer");
        //必须配置
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, "pablo_group_1");
        //默认45S以内没有发送任何心跳，协调者将剔除该消费者，而后任务再平衡分配
        properties.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        //自动提交(批量确认)，即消费者消费数据后，返回给生产者ack确认消费
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "5000");
        //反序列化
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //[latest, earliest, none]
        //earliest:自动重置偏移量到最早的偏移量
        //latest:自动重置偏移量到最近的偏移量
        //none:如果在消费者的组中没有找到先前的偏移量，则向消费者抛出异常。
        properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        //初始化consumer
        consumer = new KafkaConsumer<Integer, String>(properties);
        this.topic = topic;
    }

    @Override
    public void run() {
        //订阅topic
        consumer.subscribe(Collections.singleton(this.topic));
        while (true) {
            //这里可设置一个Boolean/interrupt，用于打断，避免空轮询
            //Duration.ofSeconds(1)设置1S拉取一次
            ConsumerRecords<Integer, String> consumerRecords = consumer.poll(Duration.ofSeconds(1));
            consumerRecords.forEach(record -> {
                System.out.println(record);
            });

        }
    }

    public static void main(String[] args) {
        new PABLO_KafkaConsumer("test_partitions").start();
    }
}
