package com.doit.kafkaday01;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Properties;

public class _02_kafka的消费者 {
    public static void main(String[] args) {
        Properties props = new Properties();
        //必须要设置的参数
        props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"linux01:9092");
        props.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.setProperty(ConsumerConfig.GROUP_ID_CONFIG,"group002");

        //可以选择设置的参数
        //是否自动提交偏移量  ==》 他默认每隔5s就往__consumer_offsets这个topic中记录一下
        props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG,"true");
        //自动提交偏移量的间隔时间  默认是5s
        props.setProperty(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG,"3000");
        /**
         * 由首先看代码中有没有设置从什么地方开始读  指定partition 指定offset
         * 2.看__consumer_offsets这个topic中有没有之前的消费记录 就紧接着上次消费到的地方开始读
         * 3.没有，那么就按照下面的参数为准，如果是earliest 那么就从最前面读  如果是latest 就从最后读
         */

        //这个参数有两个值   earliest  --from-beginning     latest --最新的位置开始读
        props.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG,"earliest");


        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);

        //订阅  告诉他要消费哪些主题的数据
//        consumer.subscribe(Arrays.asList("test"));
        ArrayList<TopicPartition> list = new ArrayList<>();
//        TopicPartition topicPartition0 = new TopicPartition("mysql", 0);
        TopicPartition topicPartition1 = new TopicPartition("mysql", 1);
//        TopicPartition topicPartition2 = new TopicPartition("mysql", 2);
//        list.add(topicPartition0);
        list.add(topicPartition1);
//        list.add(topicPartition2);
        consumer.assign(list);
//        consumer.seek(topicPartition0,0);
        consumer.seek(topicPartition1,1);
//        consumer.seek(topicPartition2,0);





      while (true){
          ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(Long.MAX_VALUE));
          for (ConsumerRecord<String, String> record : poll) {
              String value = record.value();
              String topic = record.topic();
              int partition = record.partition();
              long offset = record.offset();
              System.out.println(topic+","+partition+","+offset+","+value);
          }

      }

    }
}
