package com.tsj.message;

import com.tsj.message.config.KafkaProperties;
import com.tsj.message.util.ShutdownableThread;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

/**
 * 消费者并发消费，每一组都应该是顺序消费的，不可能出现连续两个及以上的降序offset
 * 额定的是10个一组，然后眼力观察
 */
public class Consumer extends ShutdownableThread {
    private final KafkaConsumer<String, String> consumer;
    private final String topic;
    private final String groupId;
    private final int numMessageToConsume;
    private int messageRemaining;
    private final CountDownLatch latch;
    private ThreadPoolExecutor threadPool;

    public Consumer(final String topic,
                    final String groupId,
                    final Optional<String> instanceId,
                    final boolean readCommitted,
                    final int numMessageToConsume,
                    final CountDownLatch latch) {
        super("KafkaConsumerExample", false);
        this.groupId = groupId;
        Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaProperties.KAFKA_SERVER_URL + ":" + KafkaProperties.KAFKA_SERVER_PORT);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        instanceId.ifPresent(id -> props.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, id));
        // 关闭自动提交
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "30000");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        if (readCommitted) {
            props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
        }
        // 消费自己启动之后发送到主题的消息
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");

        consumer = new KafkaConsumer<>(props);
        this.topic = topic;
        this.numMessageToConsume = numMessageToConsume;
        this.messageRemaining = numMessageToConsume;
        this.latch = latch;
        threadPool = new ThreadPoolExecutor(5, 10, 10, TimeUnit.SECONDS,
                new LinkedBlockingQueue<>(), new ThreadPoolExecutor.AbortPolicy());
    }

    KafkaConsumer<String, String> get() {
        return consumer;
    }

    @Override
    public void execute() {
        consumer.subscribe(Collections.singletonList(this.topic));
        Set<String> subscription = consumer.subscription();
        subscription.forEach(System.out::println);
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofSeconds(5));
            // 把record根据key拆分
            if (!records.isEmpty()) {
                List<ConsumerRecord<String, String>> partition = records.records(new TopicPartition(topic, 0));
                Map<String, List<ConsumerRecord<String, String>>> memoryQueue = partition.stream().collect(Collectors.groupingBy(ConsumerRecord::key));
                // 封装每个key为Runnable，提交给线程池
                List<Runnable> runnables = runnableWrapper(memoryQueue);
                runnables.forEach(runnable -> {
                    threadPool.execute(runnable);
                    // 等待一个runable执行完成
                    try {
                        Thread.currentThread().sleep(2000);
                    } catch (InterruptedException e) {
                        throw new RuntimeException(e);
                    }
                });
                // TODO 测试数据，需要多组key消息，assert（key相同的消息被一个线程顺利处理）,assert(控制台上每组消费者的消费id是严格递增)

                messageRemaining -= records.count();
                if (messageRemaining <= 0) {
                    System.out.println(groupId + " finished reading " + numMessageToConsume + " messages");
                    break;// 应该导致子线程也结束
                }
            }
        }
        // 提交偏移量
        consumer.commitSync();
        consumer.close();
        threadPool.shutdown();
        latch.countDown();
    }

    private List<Runnable> runnableWrapper(Map<String, List<ConsumerRecord<String, String>>> memoryQueue) {
        List<Runnable> res = new ArrayList<>();
        memoryQueue.forEach(
                (key, list) -> res.add(() -> {
                    // 获取一个组的消息打印
                    list.forEach(stringStringConsumerRecord -> System.out.println(stringStringConsumerRecord.toString()));
                })
        );
        return res;
    }
}