package demo;


import lombok.Getter;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.Config;
import org.apache.kafka.clients.admin.KafkaAdminClient;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.config.ConfigResource;
import org.apache.kafka.common.errors.*;
import org.apache.kafka.common.serialization.ByteArrayDeserializer;
import org.apache.kafka.common.serialization.ByteArraySerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.Function;
import java.util.regex.Pattern;
import java.util.stream.Collectors;


/**
 * kafka工具类
 */
@Slf4j
public class KafkaUtils {

    private static final String CLIENT_ID = "push-message";

    /**
     * 事务ID后缀
     */
    private static AtomicInteger transactionSuffix = new AtomicInteger(0);

    /**
     * 事务producer缓存
     */
    private static BlockingQueue<Producer<String,byte[]>> cache = new LinkedBlockingQueue<>();

    /**
     * kafka服务器地址
     */
    private static String bootstrapServers = "127.0.0.1:9092";

    /**
     * 事务producer前缀
     */
    private static String transactionPrefix = "TX";

    public static void init(String bootstrapServers,String transactionPrefix) {
        KafkaUtils.bootstrapServers = bootstrapServers;
        KafkaUtils.transactionPrefix = transactionPrefix;
    }


    /**
     * 创建消费者
     * @param groupId 组名
     * @param pattern 订阅topic
     * @param listener ConsumerRebalanceListener
     * @return
     */
    public static Consumer<String, byte[]> createConsumer(String groupId, Pattern pattern,Function<Consumer<String, byte[]>, ConsumerRebalanceListener> listener) {
        return createConsumer(groupId, pattern, null, listener);
    }

    /**
     * 创建consumer
     * @param groupId
     * @param pattern
     * @param listener
     * @return
     */
    public static Consumer<String, byte[]> createConsumer(String groupId, Pattern pattern, java.util.function.Consumer<Properties> applyConfig, Function<Consumer<String, byte[]>, ConsumerRebalanceListener> listener) {
        final Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
        props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
        props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ByteArrayDeserializer.class.getName());
        props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);
        props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
        props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);

        // 心跳时间，当服务器在此时间内没有收到
//        props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10000);
        // 当发生rebalance此设置可以加快rebalance时间,一般设置为 SESSION_TIMEOUT 的 1/3
//        props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 3000);

        if(applyConfig != null) {
            applyConfig.accept(props);
        }

        final Consumer<String, byte[]> consumer = new KafkaConsumer<>(props);

        if(listener != null) {
            consumer.subscribe(pattern, listener.apply(consumer));
        } else {
            consumer.subscribe(pattern);
        }

        return consumer;
    }

    public static Producer<String,byte[]> createTransactionProducer() {
        return createTransactionProducer(null);
    }

    /**
     * 创建具有事务功能的producer
     * @return
     */
    public static Producer<String,byte[]> createTransactionProducer(java.util.function.Consumer<Properties> applyConfig) {
        Producer<String,byte[]> transactionProducer = cache.poll();
        if(transactionProducer == null) {
            int suffix = transactionSuffix.incrementAndGet();
            Properties props = new Properties();
            props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
            props.put(ProducerConfig.CLIENT_ID_CONFIG, CLIENT_ID + "_" + suffix);
            props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true");
            props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionPrefix + "_" + CLIENT_ID + "_" + suffix);
            props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
            props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());

            if(applyConfig != null) {
                applyConfig.accept(props);
            }

            transactionProducer = new KafkaProducer<>(props);
            transactionProducer.initTransactions();
        }
        return transactionProducer;
    }

    public static void closeTransactionProducer(Producer<String,byte[]> transactionProducer) {
        if(transactionProducer != null) {
            cache.offer(transactionProducer);
        } else {
            log.warn("close null producer");
        }
    }

    /**
     * 在事务中运行producer
     * @param action
     */
    public static void producerInTransaction(java.util.function.Consumer<Producer<String,byte[]>> action) {
        Producer<String,byte[]> transactionProducer = createTransactionProducer();
        try {
            // 开始事务
            transactionProducer.beginTransaction();
            action.accept(transactionProducer);
            // 提交事务
            transactionProducer.commitTransaction();
        } catch (KafkaException e) {
            Throwable cause = e.getCause();
            if(cause instanceof ProducerFencedException || cause instanceof OutOfOrderSequenceException || cause instanceof AuthorizationException) {
                // ProducerFencedException 表示有新的实例transaction.id与当前实例相同，并且epoch比当前要新，这是一个fatal错误，必须close
                // OutOfOrderSequenceException 表示发送的数据出现乱序，如果当前的producer是幂等性并且开启了事务，那这就是一个fatal错误，必须close
                log.error("run transaction producer error",e);
                transactionProducer.abortTransaction();
                transactionProducer.close(5, TimeUnit.SECONDS);
                transactionProducer = null;
            } else {
                transactionProducer.abortTransaction();
            }
        } finally {
            closeTransactionProducer(transactionProducer);
        }
    }

    public static void commitSyncAll(Consumer<String, byte[]> consumer, Queue<OffsetInfo> offsetInfoQueue, int size) {

        Map<TopicPartition, OffsetAndMetadata> commitOffset = new HashMap<>(offsetInfoQueue.size());

        int value = 0;

        while (true) {

            OffsetInfo offsetInfo = offsetInfoQueue.poll();

            if(offsetInfo == null) {
                break;
            }

            commitOffset.merge(offsetInfo.getTopicPartition(), offsetInfo.getOffsetAndMetadata(), (a, b) -> {
                if(a.offset() > b.offset()) {
                    return a;
                }
                return b;
            });

            value += 1;

            if(size != -1 && value >= size) {
                break;
            }
        }

        if(!commitOffset.isEmpty()) {
            consumer.commitSync(commitOffset);
        }
    }


    /**
     * 消费kafka消息并路由到客户kafka队列
     */
    public static void routerTransform(Consumer<String,byte[]>consumer, String routerGroupId, Function<ConsumerRecord<String, byte[]>, List<ProducerRecord<String, byte[]>>> transform) {

        final Duration pollDuration = Duration.ofMillis(100);
        while (true) {
            try {
                ConsumerRecords<String,byte[]> records = consumer.poll(pollDuration);
                if(records.isEmpty()) {
                    continue;
                }
                List<ProducerRecord<String, byte[]>> recordsToSend = new ArrayList<>();
                records.forEach(r -> recordsToSend.addAll(transform.apply(r)));
                if(recordsToSend.isEmpty()) {
                    continue;
                }
                KafkaUtils.producerInTransaction((producer) -> {
                    recordsToSend.forEach(producer::send);
                    // 取出每个tp最后一个offset提交
                    Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>(records.count());
                    records.partitions().stream().collect(Collectors.toMap(Function.identity(), records::records))
                            .forEach((key, value) -> offsets.put(key, new OffsetAndMetadata(value.get(value.size() - 1).offset() + 1)));
                    producer.sendOffsetsToTransaction(offsets, routerGroupId);
                });
            } catch (WakeupException e) {
                log.info("router consumer exit");
                break;
            } catch (Throwable e) {
                log.error("consumer device message error", e);
            }
        }
        consumer.close();
    }

    public static void ensureTopics(List<String> topics) {
        ensureTopics(topics, -1, -1, null);
    }

    /**
     * 创建topic，如果存在则不创建
     * @param topics topic列表
     * @param partitions 分区数
     * @param replication 副本数
     * @param config 额外配置
     */
    public static void ensureTopics(List<String> topics, int partitions, int replication, Function<String,Map<ConfigResource, Config>> config) {

        final Properties props = new Properties();
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,bootstrapServers);

        log.info("start create topic: {}", topics.toString());

        try(KafkaAdminClient adminClient = (KafkaAdminClient) AdminClient.create(props))  {

            // 获取broker节点数
            int nodeSize = 1;
            try {
                nodeSize = adminClient.describeCluster().nodes().get().size();
            } catch (InterruptedException | ExecutionException e) {
                e.printStackTrace();
            }

            for(String topic : topics) {
                try {
                    if(replication <= 0 || replication > nodeSize) {
                        replication = nodeSize;
                    }

                    if(partitions <= 0) {
                        partitions = nodeSize * 2;
                    }

                    adminClient.createTopics(Collections.singleton(new NewTopic(topic, partitions, (short) replication))).all().get();

                    if(config != null) {
                        // 额外的配置
                        Map<ConfigResource, Config> resourceConfigMap = config.apply(topic);
                        if(resourceConfigMap != null && !resourceConfigMap.isEmpty()) {
                            adminClient.alterConfigs(resourceConfigMap).all().get();
                        }
                    }
                } catch (ExecutionException e) {
                    if(e.getCause() instanceof TopicExistsException) {
                        log.warn("topic: {} already exists,ignore create", topic);
                    } else {
                        log.error("create topic: {} error", topic, e);
                    }
                } catch(InterruptedException e) {
                    log.error("create topic: {} error", topic, e);
                }
            }
        }
    }


    @Getter
    public static class OffsetInfo {
        private TopicPartition topicPartition;
        private OffsetAndMetadata offsetAndMetadata;

        public OffsetInfo(TopicPartition topicPartition, OffsetAndMetadata offsetAndMetadata) {
            this.topicPartition = topicPartition;
            this.offsetAndMetadata = offsetAndMetadata;
        }
    }

}
