package com.zw.kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.consumer.OffsetCommitCallback;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.Map;
import java.util.Properties;

/**
 * @author morningstar
 * @date 2021/11/30
 * @desc
 */
public class MySimpleConsumer {

    public final static String TOPIC_NAME = "my-replicated-topic";
    public final static String CONSUMER_GROUP_NAME = "testGroup";

    public static void main(String[] args) {

        // 1.设置参数
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "tajlmt0law.cogiot.net:39092,tajlmt0law.cogiot.net:39093,tajlmt0law.cogiot.net:39094");

        // 消费分组名
        props.put(ConsumerConfig.GROUP_ID_CONFIG, CONSUMER_GROUP_NAME);


        /**
         * 自动提交
         */
        // 是否⾃动提交offset，默认就是true
//        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
        // ⾃动提交offset的间隔时间
//        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");

        /**
         * 手动提交
         */
        // 是否⾃动提交offset，默认就是true
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");


        // 把发送的key从字符串序列化为字节数组
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        // 把发送消息value从字符串序列化为字节数组
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        // 创建消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);

        // 消费者订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));

        while (true) {
            /**
             * poll() API 是拉取消息的长轮询
             */
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord record : records) {
                //4.打印消息
                System.out.printf("收到消息：partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
                        record.offset(), record.key(), record.value());
            }

            //所有的消息已消费完
            if (records.count() > 0) {
                // 有消息
                /**
                 * ⼿动同步提交offset，当前线程会阻塞直到offset提交成功，⼀般使⽤同步提交，因为提交之后⼀般也没有什么逻辑代码了
                 */
//                consumer.commitSync();  // =======阻塞=== 提交成功


                /**
                 * ⼿动异步提交offset，当前线程提交offset不会阻塞，可以继续处理后⾯的程序逻辑
                 */
                consumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                        if (exception != null) {
                            System.err.println("Commit failed for " + offsets);
                            System.err.println("Commit failed exception: " + exception.getStackTrace());
                        }
                    }
                });

            }
        }

    }
}
