package com.kl.example;

import com.kl.example.interceptor.ConsumerInterceptorTTLExample;
import com.kl.example.serializer.MyMessageJsonDeserializer;
import com.kl.example.serializer.MyMessageProtostuffDeserializer;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.time.LocalDateTime;
import java.time.ZoneOffset;
import java.time.format.DateTimeFormatter;
import java.util.*;
import java.util.concurrent.atomic.AtomicBoolean;

public class ConsumerExample {

    private final static Logger logger = LoggerFactory.getLogger(ConsumerExample.class);
    private final static DateTimeFormatter dtf = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss");
    /**
     * 无限轮询的开关
     */
    public final static AtomicBoolean isRunning = new AtomicBoolean(true);
    /**
     * 消费者订阅的主题
     */
    private static final String topic = "test";

    /**
     * 测试基本的消息订阅
     */
    @Test
    public void testBaseReceive() {
        Properties props = initConsumerProperties();
        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        // 传入集合，可以订阅多个主题
        consumer.subscribe(Collections.singletonList(topic));
        // 主题没有消息时CPU一直空转
        while (isRunning.get()) {
            // 订阅一组topic之后，调用poll时，消费者将自动加入到组中
            // 只要持续调用poll，消费者将一直保持可用，并继续从分配的分区中接收消息，poll是非阻塞的方法，无限轮询
            // 消费者向服务器定时发送心跳，如果在session.timeout.ms配置的时间内无法发送心跳，被视为死亡，分区将重新分配
            ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, Object> record : records)
                // %s表示输出字符串，也就是将后面的字符串替换模式中的%s
                // %n表示换行
                // %d表示将整数格式化为10进制整数
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                        record.partition(), record.offset(), record.key(), record.value());
        }
    }

    /**
     * 手动同步提交消息位移
     */
    @Test
    public void testManualSyncCommit() {
        Properties props = initConsumerProperties();
        // 设置消息手动提交，默认为true
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singletonList(topic));
        while (isRunning.get()) {
            // 拉取
            ConsumerRecords<String, Object> consumerRecords = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, Object> consumerRecord : consumerRecords) {
                // 业务处理......
                System.out.printf("消息内容=%s%n", consumerRecord.value());
            }
            // 手动同步提交位移
            consumer.commitSync();
        }
    }

    /**
     * 手动同步批量提交
     */
    @Test
    public void testManualAsyncCommit() {
        final int minBatchSize = 2;
        List<ConsumerRecord<String, Object>> buffer = new ArrayList<>();
        Properties props = initConsumerProperties();
        // 设置消息手动提交，默认为true
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singletonList(topic));
        while (isRunning.get()) {
            // 拉取
            ConsumerRecords<String, Object> consumerRecords = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, Object> consumerRecord : consumerRecords) {
                // 先缓存消息，之后批量处理
                buffer.add(consumerRecord);
            }
            if (buffer.size() >= minBatchSize) {
                // 业务处理
                for (ConsumerRecord<String, Object> consumerRecord : buffer) {
                    System.out.printf("消息内容=%s%n", consumerRecord.value());
                }
                // 手动同步提交位移
                consumer.commitSync();
                // 清除缓存
                buffer.clear();
            }
        }
    }

    /**
     * 带参数的同步位移提交
     */
    @Test
    public void testManualSyncCommitWithArgs() {
        Properties props = initConsumerProperties();
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        // 传入集合，可以订阅多个主题
        consumer.subscribe(Collections.singletonList(topic));
        // 主题没有消息时CPU一直空转
        while (isRunning.get()) {
            ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
            for (ConsumerRecord<String, Object> record : records) {
                long offset = record.offset();
                TopicPartition topicPartition = new TopicPartition(topic, record.partition());
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                        record.partition(), record.offset(), record.key(), record.value());
                // 一条消息一条消息的提交，性能极地，一般不使用
                consumer.commitSync(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset)));
            }
        }
    }

    /**
     * 按照分区粒度同步消费提交
     */
    @Test
    public void testManualSyncCommitWithPartition() {
        Properties props = initConsumerProperties();
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        // 传入集合，可以订阅多个主题
        consumer.subscribe(Collections.singletonList(topic));
        // 主题没有消息时CPU一直空转
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
                // 当前主题下所有的分区集合
                Set<TopicPartition> partitions = records.partitions();
                for (TopicPartition partition : partitions) {
                    // 取出具体分区下的消息集合
                    List<ConsumerRecord<String, Object>> partitionRecords = records.records(partition);
                    // 对某一分区下的消息进行处理
                    for (ConsumerRecord<String, Object> partitionRecord : partitionRecords) {
                        // 业务处理......
                        System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                                partitionRecord.partition(), partitionRecord.offset(), partitionRecord.key(), partitionRecord.value());
                    }
                    // 当前分区最后一条消费的位移
                    ConsumerRecord<String, Object> record = partitionRecords.get(partitionRecords.size() - 1);
                    long offset = record.offset();
                    // 提交当前分区最后的消费位移
                    consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(offset)));
                }
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 异步提交位移
     */
    @Test
    public void testAsyncCommit() {
        Properties props = initConsumerProperties();
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singletonList(topic));
        try {
            while (isRunning.get()) {
                ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(100));
                for (ConsumerRecord<String, Object> record : records)
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());

                // 当前位移提交完成后会回调 onComplete 方法
                consumer.commitAsync(new OffsetCommitCallback() {
                    @Override
                    public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
                        if (exception == null) {
                            System.out.println("位移提交成功 = " + offsets);
                        } else {
                            System.out.printf("位移=%s提交失败，异常信息=%s", offsets, exception.getMessage());
                        }
                    }
                });
            }
        } finally {
            try {
                // 针对异步提交有可能出现错误的情况，可以在退出之前使用同步提交的方式来做最后的把关
                consumer.commitSync();
            } finally {
                // 释放资源
                consumer.close();
            }
        }

    }

    /**
     * 多个测试合并，指定位移消费：
     * 测试一：追前消费或回溯消费
     * 测试二：从分区末尾开始消费
     * 测试三：指定时间戳开始消费
     */
    @Test
    public void testConsumeForwardOrTestConsumeLatest() {
        Properties props = initConsumerProperties();
        Consumer<String, Object> consumer = new KafkaConsumer<>(props);
        consumer.subscribe(Collections.singletonList(topic));
        Set<TopicPartition> assignments = new HashSet<>();
        // 如果不为0，说明已经成功分配到了分区
        while (assignments.size() == 0) {
            // 需要给poll方法一点时间来进行分区分配的逻辑
            consumer.poll(Duration.ofMillis(100));
            // 获取到当前消费者所拥有的分区
            assignments = consumer.assignment();
        }
        // 测试1：追前消费或回溯消费
        //consumerForward(consumer, assignments);
        // 测试2：从分区末尾开始消费
        //consumerLatest(consumer,assignments);
        // 测试3：指定时间戳开始消费
        consumeTimeStamp(consumer, assignments);
        while (isRunning.get()) {
            ConsumerRecords<String, Object> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, Object> record : records) {
                // 业务处理......
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s, Date = %s%n",
                        record.partition(), record.offset(), record.key(), record.value(),
                        LocalDateTime.ofEpochSecond(record.timestamp() / 1000, 0, ZoneOffset.ofHours(0)).format(dtf));
            }
        }
    }

    public void consumerForward(Consumer<String, Object> consumer, Set<TopicPartition> assignments) {
        for (TopicPartition assignment : assignments) {
            // 从每个分区位移为10的位置开始消费
            consumer.seek(assignment, 10);
        }
    }

    public void consumerLatest(Consumer<String, Object> consumer, Set<TopicPartition> assignments) {
        // 获取分区末尾的偏移:获取的是将要写入最新消息的位置
        Map<TopicPartition, Long> offsets = consumer.endOffsets(assignments);
        for (TopicPartition assignment : assignments) {
            // 从每个分区末尾开始消费
            consumer.seek(assignment, offsets.get(assignment));
        }
    }

    public void consumeTimeStamp(Consumer<String, Object> consumer, Set<TopicPartition> assignments) {
        Map<TopicPartition, Long> timeStampToSearch = new HashMap<>();
        for (TopicPartition assignment : assignments) {
            // 准备消费24个小时之前的分区消息
            timeStampToSearch.put(assignment, System.currentTimeMillis() - 24 * 3600 * 1000);
        }
        // 根据大于指定时间戳的所有分区集合
        Map<TopicPartition, OffsetAndTimestamp> offsets = consumer.offsetsForTimes(timeStampToSearch);
        for (TopicPartition assignment : assignments) {
            OffsetAndTimestamp offsetAndTimestamp = offsets.get(assignment);
            if (offsetAndTimestamp != null) {
                // 获取并设置当前满足时间戳条件分区位移offset
                consumer.seek(assignment, offsetAndTimestamp.offset());
            }
        }
    }


    /**
     * 消费者基本的属性初始化
     */
    public static Properties initConsumerProperties() {
        Properties props = new Properties();
        // 设置连接的broker地址
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        // 设置当前的消费组，消费组是逻辑上的概念
        props.put(ConsumerConfig.GROUP_ID_CONFIG, "testGroup1");
        // 设置消息自动提交，默认为true
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
        // 设置消息自动提交的时间间隔，默认是5秒
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        // 设置消息 k-v 的反序列化器
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");

        // ===添加自定义序列化器===
        // addMyDeSerializer(props);

        // ===添加自定义的TTL拦截器===
        addTTLInterceptor(props);
        return props;
    }


    public static void addMyDeSerializer(Properties props) {
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        //方式一
        //props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MyMessageJsonDeserializer.class.getName());
        //方式二
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MyMessageProtostuffDeserializer.class.getName());
    }

    public static void addTTLInterceptor(Properties props) {
        props.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, ConsumerInterceptorTTLExample.class.getName());
    }
}