package com.syy.kafka;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;

import java.util.*;

public class Consumer {
    public static void main(String[] args) {
        Properties properties = new Properties();
        // kv的序列化器,具体看kv的类型
        properties.put("group.id", "test_01");
        properties.put("bootstrap.servers", "hadoop1:9092");
        // 从最早的地方拉数据
        properties.put("auto.offset.reset", "earliest");
        // 保存offset true自动保存，false不自动  5秒保存一次
        properties.setProperty("enable.auto.commit", "false");
        // 自动保存的间隔
//        properties.setProperty("auto.commit.interval.ms", "5000");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(properties);

        // 订阅话题
        consumer.subscribe(Arrays.asList("first"));
        //拉取数据
        while (true) {
            //每次消费500条数据
            ConsumerRecords<String, String> datas = consumer.poll(2000);
            for (ConsumerRecord<String, String> data : datas) {
                {// 原子绑定 看具体下游框架
                    System.out.println(data);
//                    consumer.commitSync();
                    //每消费一个消息，手动保存offset，同步提交，可靠性行虽高，性能地，但是也有重复消费的风险
                }
            }


            //异步提交，比如已经消费了500条，但是offset才保存到450
//            consumer.commitAsync(new OffsetCommitCallback() {
//                @Override
//                public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
//                    if (exception != null) {
//                        System.err.println("Commit failed for" + offsets);
//                    }
//                }
//            });

        }


        // 关闭资源
//        consumer.close();

    }
}
