package com.springboot.kafka.consumer;

import com.springboot.kafka.config.KafkaConfig;
import com.springboot.kafka.producer.RecordBean;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

public class ConsumerTest {

    public static final String GROUP_ID = "group.demo";
    private static final AtomicBoolean isRunning = new AtomicBoolean(true);

    public static void main(String[] args) {
        Properties properties = initConfig();
        //创建消费组实例
        KafkaConsumer<String, RecordBean> consumer = new KafkaConsumer<String, RecordBean>(properties);
        //订阅主题
        //1.订阅主题集合 多次订阅，以最后一次为准  订阅状态为：AUTO_TOPICS
        consumer.subscribe(Arrays.asList(KafkaConfig.TOPIC, "topic2", "topic3..."));
        consumer.subscribe(Arrays.asList(KafkaConfig.TOPIC));

        //2.通配符方式订阅  订阅状态为：AUTO_PATTERN
        //consumer.subscribe(Pattern.compile("topic-*"));

        //3

        //4.订阅主题的特定分区 参数：List<TopicPartition>  订阅状态为：USER_ASSIGNED
        // consumer.assign(Arrays.asList(new TopicPartition("topic", 0)));
        //5.通过partitionsFor 方法获取分区信息
//        List<PartitionInfo> partitionInfoList = consumer.partitionsFor(KafkaConfig.TOPIC);
//        List<TopicPartition> topicPartitionList = new ArrayList<>();
//        if (!partitionInfoList.isEmpty()) {
//            for (PartitionInfo partitionInfo : partitionInfoList) {
//                topicPartitionList.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
//            }
//        }
//        consumer.assign(topicPartitionList);


        //取消订阅  下面三种方法均可取消订阅，无论是上面那种形式的订阅  订阅状态变为：NONE
       /* consumer.unsubscribe();
        consumer.subscribe(new ArrayList<>());
        consumer.assign(new ArrayList<>());*/


        /**
         * 通过subscribe()方法订阅的主题具备消费者自动在均衡功能，可自动分配消费组与分区的关系
         *
         * 通过assign() 方法订阅的主题不具备再均衡功能
         *
         */

        try {
            while (isRunning.get()) {
                //poll方法的参数  timeout是拉取等待时间，直到超时或者有可消息数据 才返回，设为0 则直接返回
                ConsumerRecords<String, RecordBean> records = consumer.poll(Duration.ofMillis(1000));

                //1.遍历方式获取消息
                Iterator<ConsumerRecord<String, RecordBean>> iterator = records.iterator();
                while (iterator.hasNext()) {
                    ConsumerRecord<String, RecordBean> record = iterator.next();
                    if (record.value() != null) {
                        System.out.println("topic:" + record.topic());
                        System.out.println("partition:" + record.partition());
                        System.out.println("offset:" + record.offset());
                        System.out.println("key:" + record.key());
                        System.out.println("value:" + record.value().toString());
                    }
                    if (!iterator.hasNext()) {
                        isRunning.set(false);
                    }
                }
                //2.获取指定分区的消息
                //2.1 partitions获取全部分区信息
                Set<TopicPartition> partitionSet = records.partitions();

                //4.1暂停获取某些分区的消息
                consumer.pause(partitionSet);
                //4.2获取被暂停获取消息的分区的集合
                Set<TopicPartition> topicPartitionSet = consumer.paused();
                //4.3恢复获取某些分区消息
                consumer.resume(topicPartitionSet);
                for (TopicPartition topicPartition : partitionSet) {
                    //2.2 获取分区的消息
                    List<ConsumerRecord<String, RecordBean>> partitionRecords = records.records(topicPartition);
                    for (ConsumerRecord<String, RecordBean> parRecord : partitionRecords) {
                        System.out.println("按分区获取消息---》" + parRecord.partition() + ":" + parRecord.value());
                        //3.1 手动 同步提交  提交过程使线程阻塞 性能极低
                        consumer.commitSync();

                        //3.3 异步提交
                        consumer.commitAsync();

                        //3.5 带回调的异步提交
                        consumer.commitAsync(((offsets, exception) -> {
                            if (exception == null) {
                                System.out.println("提交成功");
                            } else {
                                //提交失败  需要处理
                            }
                        }));
                    }
                    //3.2 按分区粒度提交
                    long offset = partitionRecords.get(partitionRecords.size() - 1).offset();
                    consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset + 1)));
                    //3.4 按分区异步提交
                    consumer.commitAsync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset + 1)),
                            (offsets, exception) -> {
                                if (exception == null) {
                                    System.out.println("提交成功");
                                } else {
                                    //提交失败  需要处理
                                }
                            });

                    if (partitionRecords.isEmpty()) {
                        isRunning.set(false);
                        //推出poll逻辑，抛出异常，不用处理 仅仅是为了跳出循环
                        consumer.wakeup();
                    }
                }


            }
        } catch (Exception e) {

        } finally {
            //3.6 退出或者再均衡同步提交一次
            try{
                consumer.commitSync();
            }finally {
                //默认最长等待30秒后 强制退出  可手动设定
                consumer.close();
            }
        }
    }

    //介绍seek()方法
    public static void consumer1() {
        Properties properties = initConfig();
        //创建消费组实例
        KafkaConsumer<String, RecordBean> consumer = new KafkaConsumer<String, RecordBean>(properties);
        //订阅主题
        consumer.subscribe(Arrays.asList(KafkaConfig.TOPIC));
        //seek方法需要先通过poll获取消费者分配的分区
        //时间设为0 则获取不到，设太长则浪费
        Set<TopicPartition> assignment = new HashSet<>();
        //直到获取到分区信息后 停止
        while (assignment.isEmpty()) {
            consumer.poll(Duration.ofMillis(100));
            assignment = consumer.assignment();
        }
        /**指定分区的具体位置*/
        //获取每个分区 末尾偏移消息
        Map<TopicPartition, Long> map = consumer.endOffsets(assignment);
        Map<TopicPartition, Long> beginningOffsets = consumer.beginningOffsets(assignment);
        //为分区设定 拉取偏移量 offset
        for (TopicPartition topicPartition : assignment) {
            //消费分区末尾位置的消费
            consumer.seek(topicPartition, map.get(topicPartition));
            //消费分区开始位置的消息
            consumer.seek(topicPartition, beginningOffsets.get(topicPartition));
        }
        //kafka自带的 消费 起始位置和末尾位置 的方法
        consumer.seekToBeginning(assignment);
        consumer.seekToEnd(assignment);
        /**指定分区从某一时刻(毫秒时间戳)开始*/
        Map<TopicPartition, Long> topicPartitionLongMap = new HashMap<>();
        //为分区指定开始时间的时间戳
        for (TopicPartition topicPartition : assignment) {
            topicPartitionLongMap.put(topicPartition, new Date().getTime() - 1 * 24 * 60 * 60 * 1000);
        }
        //获取分区 某时刻的 offset位置
        Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes = consumer.offsetsForTimes(topicPartitionLongMap);
        //消费分区消息
        for (TopicPartition topicPartition : assignment) {
            OffsetAndTimestamp offsetAndTimestamp = offsetsForTimes.get(topicPartition);
            if (offsetAndTimestamp!=null) {
                consumer.seek(topicPartition, offsetAndTimestamp.offset());
            }
        }
        while (isRunning.get()) {
            //拉去消息
            ConsumerRecords<String, RecordBean> consumerRecords = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, RecordBean> record : consumerRecords) {
                System.out.println("record" + record.value());
            }
        }
    }
    public static void consumer2() {
        Properties properties = initConfig();
        //创建消费组实例
        KafkaConsumer<String, RecordBean> consumer = new KafkaConsumer<String, RecordBean>(properties);
        //订阅主题
        Map<TopicPartition, OffsetAndMetadata> map = new HashMap<>();
        //再均衡监听器
        consumer.subscribe(Arrays.asList(KafkaConfig.TOPIC), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                //再均衡之前，消费者 停止消费之后  参数是再均衡之前分配到的分区

                //1，将各个分区的offset存储在外部  db redis等
                for (TopicPartition partition : partitions) {

                    //已经提交过的消费者位移
                    OffsetAndMetadata committed = consumer.committed(partition);
                    long offset = committed.offset();
                    //下一次要消费的 位移
                    long position = consumer.position(partition);

                    //在消费丢失或者重复消费时候 二者不相等
                }
                //2，将各个分区同步提交
                consumer.commitSync(map);
                map.clear();
            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
                //再均衡之后，消费组 开始消费之前  参数是再均衡之后的分配的分区
                //1，读取外部存储 seek方法指定各分区的 offset位置
                for (TopicPartition partition : partitions) {
                    consumer.seek(partition, 1L);//offset从外部存储获取
                }
                //2，如果已经同步提交 就不会处理
            }
        });
        while (isRunning.get()) {
            //拉去消息
            ConsumerRecords<String, RecordBean> consumerRecords = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, RecordBean> record : consumerRecords) {
                System.out.println("record" + record.value());

                map.put(new TopicPartition(record.topic(), record.partition()), new OffsetAndMetadata(record.offset() + 1));

            }
            consumer.commitAsync(map, (offsets, exception) -> {
                if (exception != null) {
                    //异步提交失败
                }
            });
        }
    }

    private static Properties initConfig() {
        Properties properties = new Properties();
        //集群地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BROKER_LIST);
        //反序列化器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //自定义反序列化器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, RecordDeserializer.class.getName());

        //消费组id
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, GROUP_ID);

        //关闭自动提交 offset 默认true 开启自动提交
        properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        //默认自动定期提交 间隔 默认是 5秒  关闭自动提交后 此配置无效
        properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 5);

        //可以添加拦截器
        properties.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerDemoInterceptor.class.getName());

        properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "demo");
        return properties;
    }
}
