package org.xxd.kafka.clients.producer.internals;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.xxd.kafka.clients.common.Cluster;
import org.xxd.kafka.clients.common.Node;
import org.xxd.kafka.clients.common.TopicPartition;
import org.xxd.kafka.clients.common.record.Record;
import org.xxd.kafka.clients.common.util.CopyOnWriteMap;
import org.xxd.kafka.clients.common.util.Time;

import java.util.ArrayList;
import java.util.Deque;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * @author: XiaoDong.Xie
 * @create: 2020-09-27 10:37
 * @description: 内存缓冲数据结构
 * <p>
 * 我们需要把数据打成一个个batch 发送给相应的broker，那么可能这些消息是不同的topic、不同的partition
 * 那我们这里就考虑 聚合的维度 应该是broker，还是说partition，应该是partition，因为broker 对应了多个partition那么
 * 如果按broker聚合不好处理，按partition聚合，同时也就满足了broker的功能
 * 这个东西应该是分离开来的，可以弄成一个个batch的，而不是就一个batch
 * <p>
 * 另外
 */
public class RecordAccumulator {

    private static final Logger log = LoggerFactory.getLogger(RecordAccumulator.class);

    private volatile boolean closed;
    private final AtomicInteger flushesInProgress;
    // 正在执行追加的线程
    private final AtomicInteger appendsInProgress;
    private final int batchSize;
    private final String compression;
    private final long lingerMs;
    private final long retryBackoffMs;
    private final BufferPool free;
    private final Time time;
    private final ConcurrentMap<TopicPartition, Deque<RecordBatch>> batches;
    //
    private final IncompleteRecordBatches incomplete;
    private final Set<TopicPartition> muted;
    private int drainIndex;

    /**
     * 初始化一个内存缓冲
     *
     * @param batchSize       一个batch大小
     * @param totalMemorySize 内存缓冲总大小
     * @param compression     压缩类型
     * @param lingerMs        消息最多暂存的时间
     * @param retryBackoffMs  发送失败，重新发送的间隔
     * @param time            时间工具组件
     */
    public RecordAccumulator(int batchSize,
                             long totalMemorySize,
                             String compression,
                             long lingerMs,
                             long retryBackoffMs,
                             Time time) {
        this.drainIndex = 0;
        this.closed = false;
        this.flushesInProgress = new AtomicInteger(0);
        this.appendsInProgress = new AtomicInteger(0);
        this.batchSize = batchSize;
        this.compression = compression;
        this.lingerMs = lingerMs;
        this.retryBackoffMs = retryBackoffMs;
        this.batches = new CopyOnWriteMap<>();
        String metricGrpName = "producer-metrics";
        this.free = new BufferPool(totalMemorySize, batchSize, time);
        this.incomplete = new IncompleteRecordBatches();
        this.muted = new HashSet<>();
        this.time = time;
    }

    /**
     * @return Whether there is any unsent record in the accumulator.
     */
    public boolean hasUnsent() {
        for (Map.Entry<TopicPartition, Deque<RecordBatch>> entry : this.batches.entrySet()) {
            Deque<RecordBatch> deque = entry.getValue();
            synchronized (deque) {
                if (!deque.isEmpty())
                    return true;
            }
        }
        return false;
    }

    public void abortIncompleteBatches() {
        // We need to keep aborting the incomplete batch until no thread is trying to append to
        // 1. Avoid losing batches.
        // 2. Free up memory in case appending threads are blocked on buffer full.
        // This is a tight loop but should be able to get through very quickly.
        do {
            abortBatches();
        } while (appendsInProgress());
        // After this point, no thread will append any messages because they will see the close
        // flag set. We need to do the last abort after no thread was appending in case there was a new
        // batch appended by the last appending thread.
        abortBatches();
        this.batches.clear();
    }

    private boolean appendsInProgress() {
        return appendsInProgress.get() > 0;
    }

    private void abortBatches() {
        for (RecordBatch batch : incomplete.all()) {
            Deque<RecordBatch> dq = getDeque(batch.topicPartition);
            // Close the batch before aborting
            synchronized (dq) {
                batch.records.close();
                dq.remove(batch);
            }
            batch.done(-1L, Record.NO_TIMESTAMP, new IllegalStateException("Producer is closed forcefully."));
            deallocate(batch);
        }
    }

    /**
     * 释放内存池
     * @param batch
     */
    private void deallocate(RecordBatch batch) {
        incomplete.remove(batch);
        free.deallocate(batch.records.buffer(), batch.records.initialCapacity());
    }

    private Deque<RecordBatch> getDeque(TopicPartition tp) {
        return batches.get(tp);
    }

    /**
     * 检查哪些node节点是否是准备就绪可以发送数据了
     * 满足的条件如下
     * 1、一个RecordBatch 满了
     * 2、从进入缓冲区开始，已经过了指定的 linger.ms 的时间
     * 3、竟然有线程在等待获取内存Buffer
     * 4、要关闭producer了
     * @param cluster
     * @param nowMs
     * @return
     */
    public ReadyCheckResult ready(Cluster cluster, long nowMs) {
        Set<Node> readyNodes = new HashSet<>();
        long nextReadyCheckDelayMs = Long.MAX_VALUE;
        boolean unknownLeadersExist = false;
        boolean exhausted = this.free.queued() > 0;
        for (Map.Entry<TopicPartition, Deque<RecordBatch>> entry : batches.entrySet()) {
            TopicPartition part = entry.getKey();
            Deque<RecordBatch> deque = entry.getValue();

            Node leader = cluster.leaderFor(part);
            if (leader == null) {
                unknownLeadersExist = true;
            } else if (!readyNodes.contains(leader) && !muted.contains(part)) {
                synchronized (deque) {
                    RecordBatch batch = deque.peekFirst();
                    if (batch != null) {
                        // 是否正在重试，且重试的间隔时间 是否已经到了，就是重试间隔到了 就可以放入readNodes里面去
                        boolean backingOff = batch.attempts > 0 && batch.lastAttemptMs + retryBackoffMs > nowMs;
                        long waitedTimeMs = nowMs - batch.lastAttemptMs; // 50ms
                        long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs; // 0ms
                        long timeLeftMs = Math.max(timeToWaitMs - waitedTimeMs, 0); // 0ms

                        boolean full = deque.size() > 1 || batch.records.isFull();
                        boolean expired = waitedTimeMs >= timeToWaitMs;
                        boolean sendable = full || expired || exhausted || closed || flushInProgress();
                        if (sendable && !backingOff) {
                            readyNodes.add(leader);
                        } else {
                            // Note that this results in a conservative estimate since an un-sendable partition may have
                            // a leader that will later be found to have sendable data. However, this is good enough
                            // since we'll just wake up and then sleep again for the remaining time.
                            nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs);
                        }
                    }
                }
            }
        }

        return new ReadyCheckResult(readyNodes, nextReadyCheckDelayMs, unknownLeadersExist);
    }

    private boolean flushInProgress() {
        return flushesInProgress.get() > 0;
    }

    public final static class ReadyCheckResult {
        /* 已经准备好发送的数据节点 */
        public final Set<Node> readyNodes;
        /* 下次来检查是否已经准备的延迟时间 */
        public final long nextReadyCheckDelayMs;
        /* 是否有不知道的leader节点 */
        public final boolean unknownLeadersExist;

        public ReadyCheckResult(Set<Node> readyNodes, long nextReadyCheckDelayMs, boolean unknownLeadersExist) {
            this.readyNodes = readyNodes;
            this.nextReadyCheckDelayMs = nextReadyCheckDelayMs;
            this.unknownLeadersExist = unknownLeadersExist;
        }
    }

    /**
     * 用于保存请求发送出去，但是还没有ack确认的RecordBatch
     */
    private final static class IncompleteRecordBatches {
        private final Set<RecordBatch> incomplete;

        public IncompleteRecordBatches() {
            this.incomplete = new HashSet<RecordBatch>();
        }

        public void add(RecordBatch batch) {
            synchronized (incomplete) {
                this.incomplete.add(batch);
            }
        }

        public void remove(RecordBatch batch) {
            synchronized (incomplete) {
                boolean removed = this.incomplete.remove(batch);
                if (!removed)
                    throw new IllegalStateException("Remove from the incomplete set failed. This should be impossible.");
            }
        }

        public Iterable<RecordBatch> all() {
            synchronized (incomplete) {
                return new ArrayList<>(this.incomplete);
            }
        }
    }
}
