package com.example.kafkademo.instalOffset;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Properties;
import java.util.stream.Collectors;

/**
 * @author yanyun zhu
 * @version 1.0
 * @date 2021/12/31 11:06
 *
 * TODO 代码执行后没有效果，没有认真去查询原因
 */
public class ConsumerOffset {

    public static void main(String[] args) {
        Properties properties = instalOffset();
        String topic = "test-a";

//        seekToEnd(properties, topic);

//        toBeginning(properties, topic);

        seekToSpecified(properties, topic);

    }

    /**
     * 每次调用 seek 方法只能重设一个分区的位移。OffsetAndMetadata 类是一个封装了 Long 型的位移和自定义元数据的复合类
     */
    public static Properties instalOffset(){
        Properties consumerProperties = new Properties();
        consumerProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);    //要禁止自动提交位移
        consumerProperties.put(ConsumerConfig.GROUP_ID_CONFIG, "12334");
        consumerProperties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        consumerProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        consumerProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.42.25.12:9092,10.42.25.13:9092,10.42.25.15:9092");

        return consumerProperties;
    }

    public static void toBeginning(Properties properties, String topic){
        try (final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
            consumer.subscribe(Collections.singleton(topic));
            // TODO 此方法要用 poll(long)
            consumer.poll(0);
            List<TopicPartition> list = consumer.partitionsFor(topic).stream().map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
                    .collect(Collectors.toList());
            System.out.println();
            consumer.seekToBeginning(consumer.partitionsFor(topic).stream().map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
                    .collect(Collectors.toList()));
        }catch (Exception e){
            e.printStackTrace();
        }
    }

    public static void seekToEnd(Properties properties, String topic){
        try (final KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties)) {
            consumer.subscribe(Collections.singleton(topic));
            // TODO 此方法要用 poll(long)
            consumer.poll(0);
            consumer.seekToEnd(
                    consumer.partitionsFor(topic).stream().map(partitionInfo -> new TopicPartition(topic, partitionInfo.partition()))
                            .collect(Collectors.toList()));
        }catch (Exception e){
            e.printStackTrace();
        }
    }

    public static void seekToSpecified(Properties properties, String topic) {
        long targetOffset = 1234L;
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        consumer.subscribe(Collections.singleton(topic));
        for (PartitionInfo info : consumer.partitionsFor(topic)) {
            TopicPartition tp = new TopicPartition(topic, info.partition());
            consumer.seek(tp, targetOffset);
        }
    }

}
