package com.doit.day01;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Arrays;
import java.util.HashMap;

public class KafkaConsumerDemo1 {
    public static void main(String[] args) {

        //是否需要我们设置一些配置参数呢？
        HashMap<String, Object> map = new HashMap<>();
        map.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        map.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        map.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, MyConfig.HOST_AND_PORT);
        map.put(ConsumerConfig.GROUP_ID_CONFIG, "g003");
        //允许自动创建topic
        map.put("allow.auto.create.topics", "true");

        //latest  earliest  指定从哪里开始读   老子想从哪里读，从哪里读
        /**
         * 具体我这个消费者从哪里读数据：
         * 1.我先看看你代码里面有没有指定具体从哪里读==》以你指定的为准
         * 2.如果说没有指定，优先去看 __consumer_offsets 这个topic 里面有没有记录过之前读到哪了
         *   如果有 就直接紧接着上次消费到的地方开始读
         * 3.如果没有，才看有没有设置 auto.offset.reset 这个参数，如果有设置
         */
        map.put("auto.offset.reset", "earliest");
        //是否自动提交偏移量 true代表的是回头将消费到哪的偏移量记录在__consumer_offsets 如果是false 就不记录
        map.put("enable.auto.commit", "true");//consumer_offsets
        //自动提交偏移量的间隔时间
        map.put("auto.commit.interval.ms", "10000");

        //创建一个消费者的对象
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(map);

        //我读取哪个topic的数据
        //订阅主题
        consumer.subscribe(Arrays.asList("test4"));
  /*      TopicPartition topicPartition = new TopicPartition("test", 0);
        TopicPartition topicPartition1 = new TopicPartition("test", 1);
        TopicPartition topicPartition2 = new TopicPartition("test", 2);

        consumer.assign(Arrays.asList(topicPartition,topicPartition1,topicPartition2));

        consumer.seek(topicPartition2,8);
        consumer.seek(topicPartition1,5);
        consumer.seek(topicPartition,4);*/

        while (true){
            //读取数据 ==》 业务逻辑的处理
            ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(Long.MAX_VALUE));
            for (ConsumerRecord<String, String> consumerRecord : poll) {
                String topic = consumerRecord.topic();
                int partition = consumerRecord.partition();
                long offset = consumerRecord.offset();
                String key = consumerRecord.key();
                String value = consumerRecord.value();
                TimestampType timestampType = consumerRecord.timestampType();
                long timestamp = consumerRecord.timestamp();
                System.out.println("topic:"+topic+",partition:"+partition+",offset:"+offset+",key:"+key+",value:"+value+",timestampType:"+timestampType+",timestamp:"+timestamp);
            }
        }


    }
}
