package com.lac.acme.kafka.consumer;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.junit.jupiter.api.Test;

import java.io.IOException;
import java.time.Duration;
import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;

public class SimpleConsumer {
    private ThreadPoolExecutor poolExecutor = new ThreadPoolExecutor(10, 20, 10000L, TimeUnit.SECONDS, new LinkedBlockingDeque<>());


    /**
     * 手动提交数据：直接按批提交。
     * 每个partition的数据都有一个任务线程来处理，当两个次线程都处理完成后，最后统一在主线程中提交。
     * 可能得问题：一部分出来成功， 一部分出来失败，最终提交后可能会导致"重复消费"或者"数据丢失"。
     */
    @Test
    public void manualCommitTestV1() {

        Properties consumerProp = getConsumerProp();
        consumerProp.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        consumerProp.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "GROUP_B");
        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(consumerProp)) {
            // 订阅自己感兴趣的topic
            kafkaConsumer.subscribe(Collections.singletonList("TOPIC_A"), new MyConsumerRebalanceListener());
            while (true) {
                // 开始拉取数据: 如果没有数据，则阻塞等待timeout时间。如果有数据则立即返回。
                ConsumerRecords<String, String> pollResult = kafkaConsumer.poll(Duration.ofMillis(50));
                System.out.println("pollResult size:" + pollResult.count());

                Set<TopicPartition> partitionList = pollResult.partitions();
                List<CompletableFuture> futureList = new ArrayList<>();
                for (TopicPartition topicPartition : partitionList) {
                    // 每个线程只处理一个partition下的数据，保证有序性。
                    List<ConsumerRecord<String, String>> records = pollResult.records(topicPartition);
                    futureList.add(CompletableFuture.runAsync(new ConsumerRecordCallableTaskV1(records), poolExecutor));
                }
                // 等所有的数据都处理完
                CompletableFuture.allOf(futureList.toArray(new CompletableFuture[]{}));

                // 手动批量提交本批所有数据
                kafkaConsumer.commitAsync((offsets, exception) -> {
                    System.out.printf("commit offsets end :%s, exception:%s %s", offsets, exception, "\n");
                    if (exception != null) {
                        kafkaConsumer.commitSync();
                    }
                });
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }


    /**
     * 手动提交数据。
     * 每个partition的数据都有一个任务线程来处理，每个任务处理完成自己全部数据处理，然后任务内部提交。
     * 可能得问题：也有可能造成"重复消费"和"数据丢失"。
     */
    @Test
    public void manualCommitTestV2() {

        Properties consumerProp = getConsumerProp();
        consumerProp.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        consumerProp.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "GROUP_B");
        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(consumerProp)) {
            // 订阅自己感兴趣的topic
            kafkaConsumer.subscribe(Collections.singletonList("TOPIC_A"), new MyConsumerRebalanceListener());
            while (true) {
                // 开始拉取数据: 如果没有数据，则阻塞等待timeout时间。如果有数据则立即返回。
                ConsumerRecords<String, String> pollResult = kafkaConsumer.poll(Duration.ofMillis(50));
                System.out.println("pollResult size:" + pollResult.count());

                Set<TopicPartition> partitionList = pollResult.partitions();
                List<Callable<Map<TopicPartition, OffsetAndMetadata>>> callableList = new ArrayList<>();
                for (TopicPartition topicPartition : partitionList) {
                    // 每个线程只处理一个partition下的数据，保证有序性。
                    List<ConsumerRecord<String, String>> records = pollResult.records(topicPartition);
                    callableList.add(new ConsumerRecordCallableTaskV2(records, topicPartition));
                }
                // 等所有的数据都处理完: 这一步还是不能少
                List<Future<Map<TopicPartition, OffsetAndMetadata>>> futureList = poolExecutor.invokeAll(callableList);
                // 性能取决于最长耗时的一个task
                for (Future<Map<TopicPartition, OffsetAndMetadata>> future : futureList) {
                    // 可能部分future成功， 然后他们就能成功提交offset，而那些失败的则不提交offset
                    kafkaConsumer.commitSync(future.get());
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * 手动提交数据。
     * 使用单线程来处理每个partition的数据，处理完一个记录一个offset。
     * 能保证数据不丢失并重复消费，但是会降低性能，频繁更新offset
     */
    @Test
    public void manualCommitTestV3() {

        Properties consumerProp = getConsumerProp();
        consumerProp.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
        consumerProp.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "GROUP_D");
        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(consumerProp)) {
            // 订阅自己感兴趣的topic
            kafkaConsumer.subscribe(Collections.singletonList("TOPIC_A"), new MyConsumerRebalanceListener());
            while (true) {
                // 开始拉取数据: 如果没有数据，则阻塞等待timeout时间。如果有数据则立即返回。
                ConsumerRecords<String, String> pollResult = kafkaConsumer.poll(Duration.ofMillis(50));
                System.out.println("pollResult size:" + pollResult.count());

                Set<TopicPartition> partitionList = pollResult.partitions();
                List<Callable<Map<TopicPartition, OffsetAndMetadata>>> callableList = new ArrayList<>();
                for (TopicPartition topicPartition : partitionList) {
                    // 每个线程只处理一个partition下的数据，保证有序性。
                    List<ConsumerRecord<String, String>> records = pollResult.records(topicPartition);
                    for (ConsumerRecord<String, String> record : records) {
                        // 伪代码处理
                        SimpleConsumer.recordPrint(record);
                        System.out.println("第一步：调用远程BaseUserApi获取用户姓名");
                        System.out.println("第二步：调用远程BrandApi获取品牌名");
                        System.out.println("第三步：组装数据，向数据库写入日志，并且更新猎头在直猎邦平台的状态");
                        //
                        OffsetAndMetadata om = new OffsetAndMetadata(record.offset() + 1);
                        kafkaConsumer.commitSync(Collections.singletonMap(topicPartition, om));
                    }
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
    }

    /**
     * 简单的单线程批量处理，自动提交
     */
    @Test
    public void autoCommitConsumerTest() throws IOException {
        Properties consumerProp = getConsumerProp();
        consumerProp.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "GROUP_A");
        try (KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(consumerProp)) {
            // 订阅自己感兴趣的topic
            kafkaConsumer.subscribe(Collections.singletonList("TOPIC_A"), new MyConsumerRebalanceListener());
            while (true) {
                // 开始拉取数据: 如果没有数据，则阻塞等待timeout时间。如果有数据则立即返回。
                ConsumerRecords<String, String> pollResult = kafkaConsumer.poll(Duration.ofMillis(1000));
                System.out.println("pollResult size:" + pollResult.count());
                if (pollResult.count() == 0) {
                    continue;
                }
                pollResult.forEach(SimpleConsumer::recordPrint);
//                System.out.println(1 / 0);
            }
        } catch (Exception e) {
            e.printStackTrace();
        }
        System.in.read();
    }

    private Properties getConsumerProp() {
        Properties consumerProp = new Properties();
        consumerProp.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.60.134:9092");

        // consumer的其他配置
        // 反序列化
        consumerProp.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        consumerProp.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        /*
            指定从哪个位置开始读取数据
            如果指定为：latest
            假设一个topic中已经有100条数据，也就是offset=100
            1. 这个consumer的group还不存在，首次启动。 应该从哪个位置开始消费？
            2. 如果这个consumer的group已经存在，并且之前消费到了50， 那么这次启动从哪个位置开始消费？

            如果指定为：earliest
            ？？？
         */
        consumerProp.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        // 指定组名
        consumerProp.put(ConsumerConfig.GROUP_ID_CONFIG, "GROUP_A");
        // 指定consumer为自动提交
        consumerProp.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);

        // 指定自动提交的间隔
        consumerProp.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 10);
        // 指定每次poll最大可以拉取多少条数据
//        consumerProp.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 2);
        return consumerProp;
    }

    private static void recordPrint(ConsumerRecord<String, String> record) {
        long offset = record.offset();
        int partition = record.partition();
        String key = record.key();
        String value = record.value();
        System.out.printf("partition:%s offset:%s key:%s, value:%s %s", partition, offset, key, value, "\n");
    }

    private static class ConsumerRecordCallableTaskV1 implements Runnable {
        private final List<ConsumerRecord<String, String>> consumerRecordList;

        public ConsumerRecordCallableTaskV1(List<ConsumerRecord<String, String>> consumerRecordList) {
            this.consumerRecordList = consumerRecordList;
        }

        @Override
        public void run() {
            for (ConsumerRecord<String, String> record : consumerRecordList) {
                SimpleConsumer.recordPrint(record);
                System.out.println("第一步：调用远程BaseUserApi获取用户姓名");
                System.out.println("第二步：调用远程BrandApi获取品牌名");
                System.out.println("第三步：组装数据，向数据库写入日志，并且更新猎头在直猎邦平台的状态");
            }
        }
    }

    private static class ConsumerRecordCallableTaskV2 implements Callable<Map<TopicPartition, OffsetAndMetadata>> {
        private final List<ConsumerRecord<String, String>> consumerRecordList;
        private final TopicPartition topicPartition;

        public ConsumerRecordCallableTaskV2(List<ConsumerRecord<String, String>> consumerRecordList, TopicPartition topicPartition) {
            this.consumerRecordList = consumerRecordList;
            this.topicPartition = topicPartition;
        }

        @Override
        public Map<TopicPartition, OffsetAndMetadata> call() throws Exception {
            for (ConsumerRecord<String, String> record : consumerRecordList) {
                SimpleConsumer.recordPrint(record);
                System.out.println("第一步：调用远程BaseUserApi获取用户姓名");
                System.out.println("第二步：调用远程BrandApi获取品牌名");
                System.out.println("第三步：组装数据，向数据库写入日志，并且更新猎头在直猎邦平台的状态");

            }

            if (consumerRecordList.size() > 0) {
                ConsumerRecord<String, String> lastestRecord = consumerRecordList.get(consumerRecordList.size() - 1);
                OffsetAndMetadata om = new OffsetAndMetadata(lastestRecord.offset() + 1);
                return Collections.singletonMap(topicPartition, om);
            } else {
                return null;
            }
        }
    }


    private static class ConsumerRecordCallableTaskV3 implements Callable<Map<TopicPartition, OffsetAndMetadata>> {
        private final ConsumerRecord<String, String> consumerRecord;
        private final TopicPartition topicPartition;

        public ConsumerRecordCallableTaskV3(ConsumerRecord<String, String> consumerRecord, TopicPartition topicPartition) {
            this.consumerRecord = consumerRecord;
            this.topicPartition = topicPartition;
        }

        @Override
        public Map<TopicPartition, OffsetAndMetadata> call() throws Exception {
            SimpleConsumer.recordPrint(consumerRecord);
            System.out.println("第一步：调用远程BaseUserApi获取用户姓名");
            System.out.println("第二步：调用远程BrandApi获取品牌名");
            System.out.println("第三步：组装数据，向数据库写入日志，并且更新猎头在直猎邦平台的状态");
            OffsetAndMetadata om = new OffsetAndMetadata(consumerRecord.offset() + 1);
            return Collections.singletonMap(topicPartition, om);
        }
    }

    private static class MyConsumerRebalanceListener implements ConsumerRebalanceListener {

        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            System.out.println("onPartitionsRevoked: " + partitions.stream().map(TopicPartition::toString).collect(Collectors.joining("、")));
        }

        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            System.out.println("onPartitionsAssigned: " + partitions.stream().map(TopicPartition::toString).collect(Collectors.joining("、")));
        }
    }
}
