package com.crazymaker.mq.demo;

import ch.qos.logback.classic.util.LogbackMDCAdapter;
import com.crazymaker.springcloud.common.context.SessionHolder;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.WakeupException;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.jetbrains.annotations.NotNull;
import org.junit.Test;

import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * kafka测试类
 *
 * @author crazymaker
 * @date 2019-07-01
 */
public class KafkaTester {
    /**
     * 测试消息
     */
    public static String msgVal = "";
    public static Optional<Boolean> stopped = Optional.of(false);
    private ExecutorService executor = Executors.newFixedThreadPool(8);
    /**
     * 定义主题
     */
    public static String TOPIC = "test";

    /**
     * kafka地址，多个地址用逗号分割
     */
    public static final String KAFKA_SERVER = "cdh1:9092";

    static {
        String word = "疯狂创客圈高并发研究社群";
        for (int i = 0; i < 1000; i++) {
            msgVal += word;
        }
    }

    private KafkaConsumer<String, String> kafkaConsumer;
    private Map<TopicPartition, Task> activeTasks = new HashMap();
    private Map<TopicPartition, OffsetAndMetadata> offsetsToCommit = new HashMap();
    private long lastCommitTime;

    /**
     * 测试生产者
     */
    @Test
    public void produceOne() {
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(initProducerProperties());
        try {
            ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, 0, "key", msgVal);
            kafkaProducer.send(record, new Callback() {
                /**
                 * 回调函数，消息发送成功或失败时调用
                 * @param recordMetadata 消息元数据
                 * @param e 异常信息
                 */
                @Override
                public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                    if (e != null) {
                        System.out.println("消息发送失败:" + e.getMessage());
                    } else {
                        System.out.println("消息发送成功,编号:" + recordMetadata.offset());
                    }
                }
            });
            System.out.println("消息发送成功,编号:" + msgVal);
        } finally {
            kafkaProducer.close();
        }
    }

    /**
     * 测试生产者，发送1024条消息
     */
    @Test
    public void produceMany() {
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(initProducerProperties());
        try {
            int count = 0;
            while (true) {
                ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, 0, "key", msgVal);
                kafkaProducer.send(record, new Callback() {
                    /**
                     * 回调函数，消息发送成功或失败时调用
                     * @param recordMetadata 消息元数据
                     * @param e 异常信息
                     */
                    @Override
                    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
                        if (e != null) {
                            System.out.println("消息发送失败,error:" + e.getMessage());
                        } else {
                            System.out.println("消息发送成功,offset:" + recordMetadata.offset());
                        }
                    }
                });
                count++;
                if (count % 1024 == 0)
                    System.out.println("消息发送成功,编号:" + count);
                if (count > 1024 * 10) break;
            }
        } finally {
            kafkaProducer.close();
        }
    }

    private static Properties initProducerProperties() {
        Properties propsMap = new Properties();
        propsMap.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
        propsMap.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        propsMap.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
        // 生产者发送消息到broker未满足批次大小的最大等待时间
        propsMap.put(ProducerConfig.LINGER_MS_CONFIG, 1000);
        // 生产者发送消息到broker的分区选择策略，可以通过配置org.apache.kafka.clients.producer.Partitioner接口的实现类，实现自己的分区策略
        propsMap.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "org.apache.kafka.clients.producer.internals.DefaultPartitioner");
        return propsMap;
    }

    /**
     * 测试消费者
     */
    @Test
    public void consumerTest() {
        kafkaConsumer = initConsumer();
        try {
            // 订阅消息
            kafkaConsumer.subscribe(Collections.singletonList(TOPIC), new ConsumerRebalanceListener() {
                @Override
                public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
                    // 1. Stop all tasks handling records from revoked partitions
                    Map<TopicPartition, Task> stoppedTasks = new HashMap<>();
                    for (TopicPartition partition : partitions) {
                        Task task = activeTasks.remove(partition);
                        if (task != null) {
                            task.stop();
                            stoppedTasks.put(partition, task);
                        }
                    }
                    // 2. Wait for stopped tasks to complete processing of current record
                    stoppedTasks.forEach((partition, task) -> {
                        long offset = task.waitForCompletion();
                        if (offset > 0)
                            offsetsToCommit.put(partition, new OffsetAndMetadata(offset));
                    });
                    // 3. collect offsets for revoked partitions
                    Map<TopicPartition, OffsetAndMetadata> revokedPartitionOffsets = new HashMap<>();
                    partitions.forEach( partition -> {
                        OffsetAndMetadata offset = offsetsToCommit.remove(partition);
                        if (offset != null)
                            revokedPartitionOffsets.put(partition, offset);
                    });
                    // 4. commit offsets for revoked partitions
                    try {
                        kafkaConsumer.commitSync(revokedPartitionOffsets);
                    } catch (Exception e) {
                        System.out.println("Failed to commit offsets for revoked partitions!");
                    }
                }

                @Override
                public void onPartitionsAssigned(Collection<TopicPartition> collection) {
                    System.out.println("onPartitionsAssigned");
                }
            });

            while (!stopped.get()) {
                ConsumerRecords<String, String> records = kafkaConsumer.poll(1000);
                handleFetchedRecords(records);
                checkActiveTasks();
                commitOffsets();
            }
        } catch (WakeupException we) {
            if (!stopped.get())
                throw we;
        } finally {
            kafkaConsumer.close();
        }
    }

    private void checkActiveTasks() {
        List<TopicPartition> finishedTasksPartitions = new ArrayList<>();
        // 遍历每个分区的任务，检查是否完成
        activeTasks.forEach((partition, task) -> {
            if (task.isFinished())
                finishedTasksPartitions.add(partition);
            long offset = task.getCurrentOffset();
            if (offset > 0)
                offsetsToCommit.put(partition, new OffsetAndMetadata(offset));
        });
        finishedTasksPartitions.forEach(partition -> activeTasks.remove(partition));
        kafkaConsumer.resume(finishedTasksPartitions);
    }

    @NotNull
    private static KafkaConsumer<String, String> initConsumer() {
        return new KafkaConsumer<>(initConsumerProperties());
    }

    /**
     * Committing offsets on every iteration of the poll loop is not recommended.
     * Since it runs very fast, doing so would generate a lot of (unnecessary) commit requests.
     * Instead, commit offsets only if the preconfigured amount of time has passed from the previous commit.
     * In this case, it’s hard coded to five seconds.
     */
    private void commitOffsets() {
        try {
            long currentTimeMillis = System.currentTimeMillis();
            if (currentTimeMillis - lastCommitTime > 5000) {
                if (!offsetsToCommit.isEmpty()) {
                    kafkaConsumer.commitAsync(offsetsToCommit, (offsets, exception) -> {
                        if (exception != null) {
                            System.out.println("Failed to commit offsets!" + exception.getMessage());
                        } else {
                            System.out.println("Successfully committed offsets: " + offsets);
                        }
                    });
                    offsetsToCommit.clear();
                }
                lastCommitTime = currentTimeMillis;
            }
        } catch (Exception e) {
            System.out.println("Failed to commit offsets!" + e.getMessage());
        }
    }

    private void handleFetchedRecords(ConsumerRecords<String, String> records) {
        if (records.count() > 0) {
            records.partitions().forEach(partition -> {
                // 处理每个分区的消息
                List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                Task task = new Task(partitionRecords);
                // 异步处理每个分区的消息
                executor.submit(task);
                // 记录每个分区的任务
                activeTasks.put(partition, task);
            });
            // 暂停当前分区的消费
            kafkaConsumer.pause(records.partitions());
        }
    }

    private static Properties initConsumerProperties() {
        Properties p = new Properties();
        p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KAFKA_SERVER);
        p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        p.put(ConsumerConfig.GROUP_ID_CONFIG, "consumer_group_1");
        p.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
        p.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        p.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 1000);
        return p;
    }
}
