package com.xazenith.kfk.consumer;

import com.xazenith.kfk.concurrent.ConsumerConcurrentContext;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.TimeUnit;
import java.util.function.Consumer;
import java.util.stream.Collectors;

import static com.xazenith.kfk.config.Configuration.*;

/**
 * 消息消费者
 *
 * @author zhou wei
 * @version 0.1
 * @since 2020-07-07
 */
public final class MessageConsumer {

    private final Logger logger = LoggerFactory.getLogger(MessageConsumer.class);

    /**
     * kafka consumer
     */
    private final KafkaConsumer<String, String> consumer;

    /**
     * 对应的 kafka bootstrap server
     */
    private final String servers;

    /**
     * 当前 consumer 订阅的主题
     */
    private final List<String> topics;

    /**
     * consumer 的分组 id
     * <p>
     * 同一分组的consumer总数不应大于 partition总数
     */
    private final String groupId;

    /**
     * 当前的客户端 id
     */
    private final String clientId;

    /**
     * 是否从第一条消息开始获取数据
     */
    private final boolean fromBeginning;

    /**
     * 用于存储某个topic对应的offset值
     * <p>
     * 用于再平衡
     */
    private final Map<TopicPartition, OffsetAndMetadata> pAndM = new ConcurrentHashMap<>();

    /**
     * 当前运行consumer逻辑的线程
     */
    private ScheduledFuture<?> schedule;

    /**
     * 消息消费者
     *
     * @param servers bootstrap servers
     * @param groupId 客户分组的 id
     * @param fromBeginning 是否从分区开始获取数据
     * @param topics 关注的主题
     */
    public MessageConsumer(String servers,
                           String groupId,
                           boolean fromBeginning,
                           List<String> topics) {

        this.groupId = groupId;
        this.servers = servers;
        this.clientId = groupId + "/" + UUID.randomUUID().toString();
        this.fromBeginning = fromBeginning;

        Properties properties = consumerConfig(servers, groupId, false);
        properties.setProperty(KFK_CONFIG_CLIENT_ID, this.clientId);

        this.consumer = new KafkaConsumer<>(properties);

        if (Objects.nonNull(topics)) {
            this.topics = new ArrayList<>(topics);
        } else {
            this.topics = new ArrayList<>();
        }

        if (this.topics.isEmpty()) {
            logger.warn("kfk consumer, servers: {}, group id: {}, client id: {}. topics is empty!", this.servers, this.groupId, this.clientId);
        }

        consumer.subscribe(this.topics, new DefaultRebalanceListener());

        logger.info("create kfk consumer, servers: {}, group id: {}, client id: {}, topics: {}",
                this.servers, this.groupId, this.clientId, String.join(",", this.topics));
    }

    /**
     * message consumer 开始监听接收到的消息
     *
     * @param recordsConsumer 消息接收器接收到消息后的回调方法
     */
    public void listening(Consumer<ConsumerRecords<String, String>> recordsConsumer) {
        this.listening(recordsConsumer, KFK_CONFIG_DEFAULT_POLL_IN_MILLS);
    }

    /**
     * message consumer 开始监听接收到的消息
     *
     * @param recordsConsumer 消息接收器接收到消息后的回调方法
     * @param waitMills       每次读取数据等待服务器端发送数据的时长
     */
    public void listening(Consumer<ConsumerRecords<String, String>> recordsConsumer, Integer waitMills) {
        int wm = Objects.isNull(waitMills) ? KFK_CONFIG_DEFAULT_POLL_IN_MILLS : waitMills;
        Runnable running = () -> {
            try {
                ConsumerRecords<String, String> poll = consumer.poll(Duration.ofMillis(wm));
                if (!poll.isEmpty()
                        && Objects.nonNull(recordsConsumer)) {

                    //保存获取记录的 topic、partition 和 offset
                    //用于重平衡时使用
                    for (TopicPartition partition : poll.partitions()) {
                        List<ConsumerRecord<String, String>> records = poll.records(partition);
                        Optional<ConsumerRecord<String, String>> max = records.stream().max(
                                Comparator.comparingLong(ConsumerRecord::offset));
                        max.ifPresent(
                                r -> pAndM.put(partition, new OffsetAndMetadata(r.offset() + 1, r.toString()))
                        );
                    }
                    recordsConsumer.accept(poll);
                }
            } catch (Exception ex) {
                logger.error("consumer is closing by error, servers: " + servers
                        + " , topics: " + String.join(",", topics)
                        + ". error message: " + ex.getMessage(), ex);
                consumer.close();
            }
            consumer.commitSync(pAndM);
        };
        this.schedule = ConsumerConcurrentContext.SCHEDULED_THREAD_POOL_EXECUTOR.scheduleWithFixedDelay(
                running, 100, 100, TimeUnit.MILLISECONDS);
    }

    /**
     * 挂起，不再监听
     * <p>
     * 会执行完最后一次监听任务后，进行挂起
     */
    public void suspend() {
        if (Objects.nonNull(this.schedule)) {
            if (!this.schedule.isCancelled()) {
                this.schedule.cancel(false);
            }
        }
    }

    /**
     * 终止当前客户端的接受操作
     */
    public void terminal() {
        this.suspend();
        this.consumer.close();
    }

    /**
     * 消费者组 id
     *
     * @return group id
     */
    public String getGroupId() {
        return groupId;
    }

    /**
     * 消费者 id
     *
     * @return client id
     */
    public String getClientId() {
        return clientId;
    }

    /**
     * bootstrap servers
     *
     * @return servers  host:port
     */
    public String getServers() {
        return servers;
    }

    /**
     * 关注的主题
     *
     * @return list of topics
     */
    public List<String> getTopics() {
        return new ArrayList<>(topics);
    }

    private class DefaultRebalanceListener implements ConsumerRebalanceListener {


        /**
         * A callback method the user can implement to provide handling of offset commits to a customized store.
         * This method will be called during a rebalance operation when the consumer has to give up some partitions.
         * It can also be called when consumer is being closed ({@link KafkaConsumer#close(Duration)})
         * or is unsubscribing ({@link KafkaConsumer#unsubscribe()}).
         * It is recommended that offsets should be committed in this callback to either Kafka or a
         * custom offset store to prevent duplicate data.
         * <p>
         * In eager rebalancing, it will always be called at the start of a rebalance and after the consumer stops fetching data.
         * In cooperative rebalancing, it will be called at the end of a rebalance on the set of partitions being revoked iff the set is non-empty.
         * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}.
         * <p>
         * It is common for the revocation callback to use the consumer instance in order to commit offsets. It is possible
         * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException}
         * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current
         * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not
         * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread.
         *
         * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked (may not
         *                   include all currently assigned partitions, i.e. there may still be some partitions left)
         * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer}
         * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer}
         */
        @Override
        public void onPartitionsRevoked(Collection<TopicPartition> partitions) {
            if (!pAndM.isEmpty()) {
                consumer.commitSync(pAndM);
                logger.info("commit offset on revoked, servers: {}, topics: {}, offset: {}",
                        servers, String.join(",", topics),
                        pAndM.values().stream()
                                .map(OffsetAndMetadata::offset)
                                .map(String::valueOf)
                                .collect(Collectors.joining(","))
                );
            }
        }

        /**
         * A callback method the user can implement to provide handling of customized offsets on completion of a successful
         * partition re-assignment. This method will be called after the partition re-assignment completes and before the
         * consumer starts fetching data, and only as the result of a {@link org.apache.kafka.clients.consumer.Consumer#poll(java.time.Duration) poll(long)} call.
         * <p>
         * It is guaranteed that under normal conditions all the processes in a consumer group will execute their
         * {@link #onPartitionsRevoked(Collection)} callback before any instance executes its
         * {@link #onPartitionsAssigned(Collection)} callback. During exceptional scenarios, partitions may be migrated
         * without the old owner being notified (i.e. their {@link #onPartitionsRevoked(Collection)} callback not triggered),
         * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} (Collection)} callback
         * will be triggered by the consumer then.
         * <p>
         * It is common for the assignment callback to use the consumer instance in order to query offsets. It is possible
         * for a {@link org.apache.kafka.common.errors.WakeupException} or {@link org.apache.kafka.common.errors.InterruptException}
         * to be raised from one of these nested invocations. In this case, the exception will be propagated to the current
         * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not
         * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread.
         *
         * @param partitions The list of partitions that are now assigned to the consumer (previously owned partitions will
         *                   NOT be included, i.e. this list will only include newly added partitions)
         * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer}
         * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer}
         */
        @Override
        public void onPartitionsAssigned(Collection<TopicPartition> partitions) {
            if (partitions.isEmpty()) {
                return;
            }
            for (TopicPartition partition : partitions) {
                OffsetAndMetadata offsetAndMetadata = pAndM.get(partition);
                if (Objects.isNull(offsetAndMetadata) && fromBeginning) {
                    offsetAndMetadata = new OffsetAndMetadata(0, String.valueOf(System.currentTimeMillis()));
                    consumer.seek(partition, offsetAndMetadata);
                }else if (Objects.nonNull(offsetAndMetadata)){
                    consumer.seek(partition, offsetAndMetadata);
                }
                logger.info("commit offset on partition assigned, servers: {}, topics: {}, offset: {}",
                        servers, String.join(",", topics),
                        pAndM.values().stream()
                                .map(OffsetAndMetadata::offset)
                                .map(String::valueOf)
                                .collect(Collectors.joining(","))
                );
            }
        }
    }
}
