/*
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with
 * this work for additional information regarding copyright ownership.
 * The ASF licenses this file to You under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with
 * the License. You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.kafka.clients.producer.internals;

import org.apache.kafka.clients.ApiVersions;
import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.KafkaException;
import org.apache.kafka.common.MetricName;
import org.apache.kafka.common.Node;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.errors.UnsupportedVersionException;
import org.apache.kafka.common.header.Header;
import org.apache.kafka.common.metrics.Measurable;
import org.apache.kafka.common.metrics.MetricConfig;
import org.apache.kafka.common.metrics.Metrics;
import org.apache.kafka.common.metrics.Sensor;
import org.apache.kafka.common.metrics.stats.Meter;
import org.apache.kafka.common.record.AbstractRecords;
import org.apache.kafka.common.record.CompressionRatioEstimator;
import org.apache.kafka.common.record.CompressionType;
import org.apache.kafka.common.record.Record;
import org.apache.kafka.common.record.RecordBatch;
import org.apache.kafka.common.record.MemoryRecords;
import org.apache.kafka.common.record.MemoryRecordsBuilder;
import org.apache.kafka.common.record.TimestampType;
import org.apache.kafka.common.utils.CopyOnWriteMap;
import org.apache.kafka.common.utils.LogContext;
import org.apache.kafka.common.utils.Time;
import org.apache.kafka.common.utils.Utils;
import org.slf4j.Logger;

import java.nio.ByteBuffer;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * This class acts as a queue that accumulates records into {@link MemoryRecords}
 * instances to be sent to the server.
 * <p>
 * The accumulator uses a bounded amount of memory and append calls will block when that memory is exhausted, unless
 * this behavior is explicitly disabled.
 */
public final class RecordAccumulator {

    private final Logger log;
    private volatile boolean closed;
    private final AtomicInteger flushesInProgress;
    private final AtomicInteger appendsInProgress;
    private final int batchSize;
    private final CompressionType compression;
    private final long lingerMs;
    private final long retryBackoffMs;
    private final BufferPool free;
    private final Time time;
    private final ApiVersions apiVersions;
    private final ConcurrentMap<TopicPartition, Deque<ProducerBatch>> batches;
    private final IncompleteBatches incomplete;
    // 以下变量仅由发送者线程访问，因此我们不需要保护它们。
    private final Map<TopicPartition, Long> muted;
    private int drainIndex;
    private final TransactionManager transactionManager;

    /**
     * Create a new record accumulator
     *
     * @param logContext The log context used for logging
     * @param batchSize The size to use when allocating {@link MemoryRecords} instances
     * @param totalSize The maximum memory the record accumulator can use.
     * @param compression The compression codec for the records
     * @param lingerMs An artificial delay time to add before declaring a records instance that isn't full ready for
     *        sending. This allows time for more records to arrive. Setting a non-zero lingerMs will trade off some
     *        latency for potentially better throughput due to more batching (and hence fewer, larger requests).
     * @param retryBackoffMs 接收到错误后重试生成请求的人工延迟时间。这避免了在短时间内用尽所有的重试。
     * @param metrics The metrics
     * @param time The time instance to use
     * @param apiVersions Request API versions for current connected brokers
     * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence
     *                           numbers per partition.
     */
    public RecordAccumulator(LogContext logContext,
                             int batchSize,
                             long totalSize,
                             CompressionType compression,
                             long lingerMs,
                             long retryBackoffMs,
                             Metrics metrics,
                             Time time,
                             ApiVersions apiVersions,
                             TransactionManager transactionManager) {
        this.log = logContext.logger(RecordAccumulator.class);
        this.drainIndex = 0;
        this.closed = false;
        this.flushesInProgress = new AtomicInteger(0);
        this.appendsInProgress = new AtomicInteger(0);
        this.batchSize = batchSize;
        this.compression = compression;
        this.lingerMs = lingerMs;
        this.retryBackoffMs = retryBackoffMs;
        this.batches = new CopyOnWriteMap<>();
        String metricGrpName = "producer-metrics";
        this.free = new BufferPool(totalSize, batchSize, metrics, time, metricGrpName);
        this.incomplete = new IncompleteBatches();
        this.muted = new HashMap<>();
        this.time = time;
        this.apiVersions = apiVersions;
        this.transactionManager = transactionManager;
        registerMetrics(metrics, metricGrpName);
    }

    private void registerMetrics(Metrics metrics, String metricGrpName) {
        MetricName metricName = metrics.metricName("waiting-threads", metricGrpName, "The number of user threads blocked waiting for buffer memory to enqueue their records");
        Measurable waitingThreads = new Measurable() {
            public double measure(MetricConfig config, long now) {
                return free.queued();
            }
        };
        metrics.addMetric(metricName, waitingThreads);

        metricName = metrics.metricName("buffer-total-bytes", metricGrpName, "The maximum amount of buffer memory the client can use (whether or not it is currently used).");
        Measurable totalBytes = new Measurable() {
            public double measure(MetricConfig config, long now) {
                return free.totalMemory();
            }
        };
        metrics.addMetric(metricName, totalBytes);

        metricName = metrics.metricName("buffer-available-bytes", metricGrpName, "The total amount of buffer memory that is not being used (either unallocated or in the free list).");
        Measurable availableBytes = new Measurable() {
            public double measure(MetricConfig config, long now) {
                return free.availableMemory();
            }
        };
        metrics.addMetric(metricName, availableBytes);

        Sensor bufferExhaustedRecordSensor = metrics.sensor("buffer-exhausted-records");
        MetricName rateMetricName = metrics.metricName("buffer-exhausted-rate", metricGrpName, "The average per-second number of record sends that are dropped due to buffer exhaustion");
        MetricName totalMetricName = metrics.metricName("buffer-exhausted-total", metricGrpName, "The total number of record sends that are dropped due to buffer exhaustion");
        bufferExhaustedRecordSensor.add(new Meter(rateMetricName, totalMetricName));
    }

    /**
     * 向累加器添加一条记录，返回追加结果
     *
     *  两次向dqueue的最后一个batch来append，即tryAppend方法
     *  一次向新申请的batch追加消息的tryAppend方法
     *
     * @param tp 将此记录发送到的主题/分区
     * @param timestamp 记录的时间戳
     * @param key The key for the record
     * @param value The value for the record
     * @param headers the Headers for the record
     * @param callback 用户提供的回调在请求完成时执行
     * @param maxTimeToBlock 缓冲区内存可用的最大阻塞时间（毫秒）
     */
    public RecordAppendResult append(TopicPartition tp,
                                     long timestamp,
                                     byte[] key,
                                     byte[] value,
                                     Header[] headers,
                                     Callback callback,
                                     long maxTimeToBlock) throws InterruptedException {
        // 线程数计数器, 记录下所有正在向缓冲区添加信息的线程，以便后续处理未完成的批次信息的时候不至于会遗漏
        appendsInProgress.incrementAndGet();
        ByteBuffer buffer = null;
        if (headers == null) {
            headers = Record.EMPTY_HEADERS;
        }
        try {
            // 获取当前topic分区所对应的dqueue，如果不存在则创建一个
            // 他会从内存缓冲区里获取一个分区对应的Deque，
            // 这个Deque里是一个队列，放了很多的Batch，就是这个分区对应的多个batch，
            Deque<ProducerBatch> dq = getOrCreateDeque(tp);
            synchronized (dq) {
                // producer 已经关闭，抛出异常
                if (closed) {
                    throw new KafkaException("Producer closed while send in progress");
                }
                // 尝试直接向dqueue里面的最后一个batch添加消息，并返回对应的添加结果信息
                RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
                if (appendResult != null) {
                    // 加入成功
                    return appendResult;
                }
            }

            // 没有可使用的batch，则新申请一块buffer
            byte maxUsableMagic = apiVersions.maxUsableProduceMagic();
            int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers));
            log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition());
            // 他会基于BufferPool给这个batch分配一块内存出来，BufferPool free;
            // 之所以说是Pool，就是因为这个batch代表的内存空间是可以复用的，
            // 用完一块内存之后会放回去下次给别人来使用，复用内存，避免了频繁的使用内存，丢弃对象，垃圾回收
            //  按照 batch.size或者消息的最大size 加入失败，新开辟一个buffer
            buffer = free.allocate(size, maxTimeToBlock);
            synchronized (dq) {
                // 再次检查producer是否关闭，关闭了抛异常
                if (closed) {
                    throw new KafkaException("Producer closed while send in progress");
                }
                // 再次尝试向dqueue里面追加消息
                RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq);
                if (appendResult != null) {
                    return appendResult;
                }
                // 追加仍然失败，那么就创建一个新的ProducerBatch进行追加
                MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic);
                // 新建一个ProducerBatch
                ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds());
                // 对新创建的ProducerBatch进行消息追加
                FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds()));
                // 将消息batch加入到队列尾部
                // 已经可以往Deque队列里写入消息了，已经有一个新分配的batch了（对应了BufferPool分配的一块内存空间）
                dq.addLast(batch);
                // 将消息放到IncompleteBatches Set集合中
                incomplete.add(batch);

                // 这个很重要，避免释放正在使用的内存空间，这里只是将对象指针指为null，实际上之前的内存空间已经被ProducerBatch接管
                buffer = null;
                // 返回RecordAppendResult对象
                return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true);
            }
        } finally {
            // 释放不必要的内存，例如第二次向dqueue里面追加消息成功后，正式return之前就会先执行这段程序来释放空间
            if (buffer != null) {
                free.deallocate(buffer);
            }
            appendsInProgress.decrementAndGet();
        }
    }

    private MemoryRecordsBuilder recordsBuilder(ByteBuffer buffer, byte maxUsableMagic) {
        if (transactionManager != null && maxUsableMagic < RecordBatch.MAGIC_VALUE_V2) {
            throw new UnsupportedVersionException("Attempting to use idempotence with a broker which does not " +
                    "support the required message format (v2). The broker must be version 0.11 or later.");
        }
        return MemoryRecords.builder(buffer, maxUsableMagic, compression, TimestampType.CREATE_TIME, 0L);
    }

    /**
     *  实际上只是从dqueue获取最后一个ProducerBatch并调用它的tryAppend方法来追加消息，所以最终都会走到ProducerBatch的tryAppend
     */
    private RecordAppendResult tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers,
                                         Callback callback, Deque<ProducerBatch> deque) {
        // 获取dqueue里面的最后一个batch
        ProducerBatch last = deque.peekLast();
        if (last != null) {
            // 如果batch不为空，则向它里面append消息，即调用batch.tryAppend
            FutureRecordMetadata future = last.tryAppend(timestamp, key, value, headers, callback, time.milliseconds());
            if (future == null) {
                last.closeForRecordAppends();
            } else {
                // 返回消息追加结果
                return new RecordAppendResult(future, deque.size() > 1 || last.isFull(), false);
            }
        }
        return null;
    }

    private boolean isMuted(TopicPartition tp, long now) {
        boolean result = muted.containsKey(tp) && muted.get(tp) > now;
        if (!result) {
            muted.remove(tp);
        }
        return result;
    }

    /**
     * 获取一个在缓冲区中存放时间过长且需要过期的batch列表。
     */
    public List<ProducerBatch> expiredBatches(int requestTimeout, long now) {
        List<ProducerBatch> expiredBatches = new ArrayList<>();
        // 遍历batches
        for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) {
            Deque<ProducerBatch> dq = entry.getValue();
            TopicPartition tp = entry.getKey();
            if (!isMuted(tp, now)) {
                synchronized (dq) {
                    // 迭代 batches ，如果它们在accumulator中的时间超过requestTimeOut 就将它们过期掉
                    ProducerBatch lastBatch = dq.peekLast();
                    Iterator<ProducerBatch> batchIterator = dq.iterator();
                    while (batchIterator.hasNext()) {
                        ProducerBatch batch = batchIterator.next();
                        boolean isFull = batch != lastBatch || batch.isFull();
                        // 检查batch是否过期.
                        // 过期的batches由maybeExpire关闭, 但是要在完成迭代后才能调用 回调方法，
                        // 因为从回调方法调用的sends 可能会将更多的batches添加到 正在迭代的deque中.
                        // 调用回调方法后，batch将被释放空间。
                        if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) {
                            expiredBatches.add(batch);
                            batchIterator.remove();
                        } else {
                            break;
                        }
                    }
                }
            }
        }
        return expiredBatches;
    }

    /**
     * Re-enqueue the given record batch in the accumulator to retry
     * 重试的Batch会放入到队列的头部，不是尾部，
     * 下一次循环的时候就可以优先处理这个要重新发送的Batch了，attempts、lastAttemptMs这些参数都会进行设置，
     * 辅助判断这个Batch下一次是什么时候要进行重试发送。Batch的内存资源不会释放掉的。
     */
    public void reenqueue(ProducerBatch batch, long now) {
        batch.reenqueued(now);
        Deque<ProducerBatch> deque = getOrCreateDeque(batch.topicPartition);
        synchronized (deque) {
            if (transactionManager != null) {
                insertInSequenceOrder(deque, batch);
            } else {
                // 把需要重试的消息放入队列中，等到重试
                deque.addFirst(batch);
            }
        }
    }

    /**
     * Split the big batch that has been rejected and reenqueue the split batches in to the accumulator.
     * @return the number of split batches.
     */
    public int splitAndReenqueue(ProducerBatch bigBatch) {
        // Reset the estimated compression ratio to the initial value or the big batch compression ratio, whichever
        // is bigger. There are several different ways to do the reset. We chose the most conservative one to ensure
        // the split doesn't happen too often.
        CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression,
                                                Math.max(1.0f, (float) bigBatch.compressionRatio()));
        Deque<ProducerBatch> dq = bigBatch.split(this.batchSize);
        int numSplitBatches = dq.size();
        Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition);
        while (!dq.isEmpty()) {
            ProducerBatch batch = dq.pollLast();
            incomplete.add(batch);
            // We treat the newly split batches as if they are not even tried.
            synchronized (partitionDequeue) {
                if (transactionManager != null) {
                    // We should track the newly created batches since they already have assigned sequences.
                    transactionManager.addInFlightBatch(batch);
                    insertInSequenceOrder(partitionDequeue, batch);
                } else {
                    partitionDequeue.addFirst(batch);
                }
            }
        }
        return numSplitBatches;
    }

    // We will have to do extra work to ensure the queue is in order when requests are being retried and there are
    // multiple requests in flight to that partition. If the first inflight request fails to append, then all the subsequent
    // in flight requests will also fail because the sequence numbers will not be accepted.
    //
    // Further, once batches are being retried, we are reduced to a single in flight request for that partition. So when
    // the subsequent batches come back in sequence order, they will have to be placed further back in the queue.
    //
    // Note that this assumes that all the batches in the queue which have an assigned sequence also have the current
    // producer id. We will not attempt to reorder messages if the producer id has changed, we will throw an
    // IllegalStateException instead.
    private void insertInSequenceOrder(Deque<ProducerBatch> deque, ProducerBatch batch) {
        // When we are requeing and have enabled idempotence, the reenqueued batch must always have a sequence.
        if (batch.baseSequence() == RecordBatch.NO_SEQUENCE) {
            throw new IllegalStateException("Trying to reenqueue a batch which doesn't have a sequence even " +
                    "though idempotence is enabled.");
        }

        if (transactionManager.nextBatchBySequence(batch.topicPartition) == null) {
            throw new IllegalStateException("We are reenqueueing a batch which is not tracked as part of the in flight " +
                    "requests. batch.topicPartition: " + batch.topicPartition + "; batch.baseSequence: " + batch.baseSequence());
        }

        ProducerBatch firstBatchInQueue = deque.peekFirst();
        if (firstBatchInQueue != null && firstBatchInQueue.hasSequence() && firstBatchInQueue.baseSequence() < batch.baseSequence()) {
            // The incoming batch can't be inserted at the front of the queue without violating the sequence ordering.
            // This means that the incoming batch should be placed somewhere further back.
            // We need to find the right place for the incoming batch and insert it there.
            // We will only enter this branch if we have multiple inflights sent to different brokers and we need to retry
            // the inflight batches.
            //
            // Since we reenqueue exactly one batch a time and ensure that the queue is ordered by sequence always, it
            // is a simple linear scan of a subset of the in flight batches to find the right place in the queue each time.
            List<ProducerBatch> orderedBatches = new ArrayList<>();
            while (deque.peekFirst() != null && deque.peekFirst().hasSequence() && deque.peekFirst().baseSequence() < batch.baseSequence()) {
                orderedBatches.add(deque.pollFirst());
            }

            log.debug("Reordered incoming batch with sequence {} for partition {}. It was placed in the queue at " +
                    "position {}", batch.baseSequence(), batch.topicPartition, orderedBatches.size());
            // Either we have reached a point where there are batches without a sequence (ie. never been drained
            // and are hence in order by default), or the batch at the front of the queue has a sequence greater
            // than the incoming batch. This is the right place to add the incoming batch.
            deque.addFirst(batch);

            // Now we have to re insert the previously queued batches in the right order.
            for (int i = orderedBatches.size() - 1; i >= 0; --i) {
                deque.addFirst(orderedBatches.get(i));
            }

            // At this point, the incoming batch has been queued in the correct place according to its sequence.
        } else {
            deque.addFirst(batch);
        }
    }

    /**
     * 获取其分区已准备好发送的节点列表，以及任何不可发送分区准备好的最早时间；
     * 还返回缓冲区 批处理是否有任何未知leader的标志。
     * <p>
     * 在以下情况下，目标节点可以发送数据：
     * <ol>
     * <li>There is at least one partition that is not backing off its send
     * <li><b>and</b> those partitions are not muted (to prevent reordering if
     *   {@value org.apache.kafka.clients.producer.ProducerConfig#MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION}
     *   is set to one)</li>
     * <li><b>and <i>any</i></b> of the following are true</li>
     * <ul>
     *     <li>The record set is full</li>
     *     <li>The record set has sat in the accumulator for at least lingerMs milliseconds</li>
     *     <li>The accumulator is out of memory and threads are blocking waiting for data (in this case all partitions
     *     are immediately considered ready).</li>
     *     <li>The accumulator has been closed</li>
     * </ul>
     * </ol>
     */
    public ReadyCheckResult ready(Cluster cluster, long nowMs) {
        Set<Node> readyNodes = new HashSet<>();
        long nextReadyCheckDelayMs = Long.MAX_VALUE;
        Set<String> unknownLeaderTopics = new HashSet<>();

        // free : BufferPool
        // 第1个条件. exhausted，内存耗尽，有人在排队等待申请内存
        boolean exhausted = this.free.queued() > 0;

        // 遍历所有的分区
        for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) {
            TopicPartition part = entry.getKey();
            Deque<ProducerBatch> deque = entry.getValue();

            Node leader = cluster.leaderFor(part);
            // 在往这个Dequeu里写入数据的时候，放一个一个的Batch的时候，也是会加锁的，
            // 在从Deque里读取数据的时候也是会加锁的，基于最最重量级的synchronized锁来进行的，直接锁死了，别人就不能操作了。
            synchronized (deque) {
                // 如果某个分区的Leader Broker还不知道是谁，此时就会设置一个标志位，后面会尝试进行元数据的拉取。
                // 先假设他的Topic对应的元数据此时都应该已经有了，Leader Broker肯定是知道的。
                if (leader == null && !deque.isEmpty()) {
                    unknownLeaderTopics.add(part.topic());
                } else if (!readyNodes.contains(leader) && !isMuted(part, nowMs)) {

                    ProducerBatch batch = deque.peekFirst();
                    if (batch != null) {
                        // 当前时间减去上一次发送这个Batch的时间， Math.max(0, nowMs - lastAttemptMs);
                        // 假设一个Batch从来没有发送过，此时当前时间减去这个Batch被创建出来的那个时间，这个Batch从创建开始到现在已经等待了多久了。
                        long waitedTimeMs = batch.waitedTimeMs(nowMs);
                        // backingOff，跟请求重试有关，
                        // 如果你的请求失败了，此时开始重试，然后就会在这里有一段判断的逻辑，
                        // 重试（参数配置）是有一个间隔时间的，默认是100ms，
                        // 如果进入了重试的阶段，上一次发送这个batch的时间 + 重试间隔的时间，是否大于了当前时间。
                        // 如果一旦进入了重试阶段，每次发送这个Batch，都必须符合重试的间隔才可以，必须得是超过了重试间隔的时间之后，才可以再次发送这个Batch。
                        // 刚开始的时候，默认情况下，发送一个Batch，肯定是不涉及到重试，attempts就一定是0，一定没有进入重试的状态。
                        boolean backingOff = batch.attempts() > 0 && waitedTimeMs < retryBackoffMs;
                        // 这个Batch从创建开始算起，最多等待多久就必须去发送，
                        // 如果是在重试的阶段，这个时间就是重试间隔，
                        // 但是在非重试的初始阶段，就是linger.ms的时间（100ms），对于他的一些参数的含义就很清晰了。
                        long timeToWaitMs = backingOff ? retryBackoffMs : lingerMs;
                        // Batch是否已满，
                        // 如果Dequeue里超过一个Batch了，说明这个peekFirst返回的Batch就一定是已经满的，
                        // 如果Dequeue里只有一个Batch，但是判断发现这个Batch达到了16kb的大小，也是已满的。
                        boolean full = deque.size() > 1 || batch.isFull();
                        // 当前Batch已经等待的时间（120ms） >= Batch最多只能等待的时间（100ms），已经超出了linger.ms的时间范围了，
                        // 否则，60ms < 100ms，此时就没有过期。
                        // 如果linger.ms默认是0，就意味着说，只要Batch创建出来了，在这个地方一定是expired = true
                        boolean expired = waitedTimeMs >= timeToWaitMs;
                        // 综合上述所有条件来判断，这个Batch是否需要发送出去，
                        // 如果Bach已满必须得发送，
                        // 如果Batch没有写满但是expired也必须得发送出去，
                        // 如果Batch没有写满而且也没有expired，但是内存已经消耗完毕。
                        boolean sendable = full || expired || exhausted || closed || flushInProgress();
                        // closed:如果上述条件都不满足，此时closed，当前客户端要关闭掉，
                        // flushInProgress()：此时就必须立马把内存缓冲的Batch都发送出去，就是当前强制必须把所有数据都flush出去到网络里面去，此时就必须得发送。

                        if (sendable && !backingOff) {
                            // 如果此时判断出来这个Batch是可以发送出去的，此时就会将这个Batch对应的那个Partiton的Leader Broker给放入到一个Set里去，
                            // 在这里不是找哪些Partition可以发送数据，也不是找Batch
                            // 在这里找的是哪些Broker有数据可以发送过去，而且通过Set进行了去重，可能对于一个Broker而言，是有多个Partiton的Batch可以发送过去的
                            readyNodes.add(leader);
                        }
                        else { // 如果说此时某个Batch还没有达到要发送的条件。

                            // 比如此时看到一个Partition的batch还没达到要发送的条件，batch没满，linger.ms也没到， 但是linger.ms设置的是最多等待100ms，但是此时已经等待了60ms，但是剩余等待的时间40ms。
                            //       40ms设置为 nextReadyCheckDelayMs 。
                            // 接下来又有一个Partition的batch同样的情况，batch没满，linger.ms没到，此时已经等待了90ms，剩余等待的时间就是10ms。
                            //       10ms会设置为nextReadyCheckDelayMs。
                            long timeLeftMs = Math.max(timeToWaitMs - waitedTimeMs, 0);
                            // 他会算出来当前所有的Partition的Batch里，暂时不能发送的那些Batch，需要等待最少时间就能发送的那个Batch，还需要等待的时间，就设置为 `nextReadyCheckDelayMs` ，
                            // 下次再来检查是否有batch可以发送，起码要等 `nextReadyCheckDelayMs` 时间过了以后才可以。
                            nextReadyCheckDelayMs = Math.min(timeLeftMs, nextReadyCheckDelayMs);
                        }
                    }
                }
            }
        }
        // 代码编写的技巧，如果你的方法要返回的是一个复杂的数据结构，此时可以定义一些Bean，里面封装你要返回的数据，哪些Broker可以发送数据过去，
        // 下一次来检查是否有Batch可以发送的时间间隔，是否有Partiton还不知道自己的Leader所在的Broke
        return new ReadyCheckResult(readyNodes, nextReadyCheckDelayMs, unknownLeaderTopics);
    }

    /**
     * Check whether there are any batches which haven't been drained
     */
    public boolean hasUndrained() {
        for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) {
            Deque<ProducerBatch> deque = entry.getValue();
            synchronized (deque) {
                if (!deque.isEmpty()) {
                    return true;
                }
            }
        }
        return false;
    }

    /**
     * Drain all the data for the given nodes and collate them into a list of batches that will fit within the specified
     * size on a per-node basis. This method attempts to avoid choosing the same topic-node over and over.
     *  1. 遍历节点
     *  2. 获取这个节点的所有分片，遍历这些分片
     *  3. 根据分片信息拿到这个分片对应的所有batch
     *  4. 将这些batch 添加到 List<ProducerBatch> ready
     *  5. 将 node_id，ready 添加到 Map<Integer, List<ProducerBatch>> batches
     *
     * @param cluster The current cluster metadata
     * @param nodes The list of node to drain
     * @param maxSize The maximum number of bytes to drain
     * @param now The current unix time in milliseconds
     * @return A list of {@link ProducerBatch} for each node specified with total size less than the requested maxSize.
     */
    public Map<Integer, List<ProducerBatch>> drain(Cluster cluster,
                                                   Set<Node> nodes,
                                                   int maxSize,
                                                   long now) {
        if (nodes.isEmpty()) {
            return Collections.emptyMap();
        }
        // 返回的nodeid对应的发送消息batch信息
        Map<Integer, List<ProducerBatch>> batches = new HashMap<>();
        // 遍历batch所发送数据节点的broker
        for (Node node : nodes) {
            int size = 0;
            List<PartitionInfo> parts = cluster.partitionsForNode(node.id());
            List<ProducerBatch> ready = new ArrayList<>();
            // drainIndex 用于记录上次发送停止的位置，本次继续从当前位置开始发送，
            // 如果每次都是从 0 位置开始，可能会导致排在后面的分区饿死，这是一个简单的负载均衡策略
            int start = drainIndex = drainIndex % parts.size();
            do {
                PartitionInfo part = parts.get(drainIndex);
                TopicPartition tp = new TopicPartition(part.topic(), part.partition());
                // 如果是需要保障消息的强顺序性，则不能将消息添加进目标分区，否则会导致消息乱序
                if (!isMuted(tp, now)) {
                    // 获取当前分区对应的 RecordBatch 集合
                    Deque<ProducerBatch> deque = getDeque(tp);
                    if (deque != null) {
                        synchronized (deque) {
                            ProducerBatch first = deque.peekFirst();
                            if (first != null) {
                                // 当前第一个batch是否处于重试状态或者已重试过
                                boolean backoff = first.attempts() > 0 && first.waitedTimeMs(now) < retryBackoffMs;
                                // 没有重试过，或者重试已超时
                                if (!backoff) {
                                    if (size + first.estimatedSizeInBytes() > maxSize && !ready.isEmpty()) {
                                        // 单次消息数据量已达到上限，结束循环，一般对应一个请求的大小，防止请求消息过大
                                        break;
                                    } else {
                                        // 处理处于重试状态下的消息
                                        ProducerIdAndEpoch producerIdAndEpoch = null;
                                        boolean isTransactional = false;

                                        // 在重试状态下的事务处理流程，不看
                                        if (transactionManager != null) {
                                            if (!transactionManager.isSendToPartitionAllowed(tp)) {
                                                break;
                                            }

                                            producerIdAndEpoch = transactionManager.producerIdAndEpoch();
                                            if (!producerIdAndEpoch.isValid()) {
                                                break;
                                            }

                                            isTransactional = transactionManager.isTransactional();

                                            if (!first.hasSequence() && transactionManager.hasUnresolvedSequence(first.topicPartition)) {
                                                break;
                                            }

                                            int firstInFlightSequence = transactionManager.firstInFlightSequence(first.topicPartition);
                                            if (firstInFlightSequence != RecordBatch.NO_SEQUENCE && first.hasSequence() && first.baseSequence() != firstInFlightSequence) {
                                                break;
                                            }
                                        }

                                        // 遍历每个节点，节点的起始位置也以一个轮训方式来遍历，并且每个队列里面的batch也都是只取第一个，每个队列轮训着取，
                                        // 所有这些操作都是为了对消息发送的均衡处理，保障消息公平发送
                                        ProducerBatch batch = deque.pollFirst();
                                        if (producerIdAndEpoch != null && !batch.hasSequence()) {
                                            batch.setProducerState(producerIdAndEpoch, transactionManager.sequenceNumber(batch.topicPartition), isTransactional);
                                            transactionManager.incrementSequenceNumber(batch.topicPartition, batch.recordCount);
                                            log.debug("Assigned producerId {} and producerEpoch {} to batch with base sequence " +
                                                            "{} being sent to partition {}", producerIdAndEpoch.producerId, producerIdAndEpoch.epoch, batch.baseSequence(), tp);

                                            transactionManager.addInFlightBatch(batch);
                                        }
                                        // close代表着消息batch通道被关闭，只能读取，无法写入
                                        batch.close();
                                        size += batch.records().sizeInBytes();
                                        ready.add(batch);
                                        batch.drained(now);
                                    }
                                }
                            }
                        }
                    }
                }
                // 更新本次drainIndex
                this.drainIndex = (this.drainIndex + 1) % parts.size();
            } while (start != drainIndex);

            // 发往同一个broker的batch封装在一起
            batches.put(node.id(), ready);
        }
        return batches;
    }

    private Deque<ProducerBatch> getDeque(TopicPartition tp) {
        return batches.get(tp);
    }

    /**
     * Get the deque for the given topic-partition, creating it if necessary.
     */
    private Deque<ProducerBatch> getOrCreateDeque(TopicPartition tp) {
        Deque<ProducerBatch> d = this.batches.get(tp);
        if (d != null) {
            return d;
        }
        d = new ArrayDeque<>();
        Deque<ProducerBatch> previous = this.batches.putIfAbsent(tp, d);
        if (previous == null) {
            return d;
        } else {
            return previous;
        }
    }

    /**
     * Deallocate the record batch
     */
    public void deallocate(ProducerBatch batch) {
        incomplete.remove(batch);
        // Only deallocate the batch if it is not a split batch because split batch are allocated outside the buffer pool.
        if (!batch.isSplitBatch()) {
            free.deallocate(batch.buffer(), batch.initialCapacity());
        }
    }

    /**
     * Package private for unit test. Get the buffer pool remaining size in bytes.
     */
    long bufferPoolAvailableMemory() {
        return free.availableMemory();
    }

    /**
     * Are there any threads currently waiting on a flush?
     *
     * package private for test
     */
    boolean flushInProgress() {
        return flushesInProgress.get() > 0;
    }

    /* Visible for testing */
    Map<TopicPartition, Deque<ProducerBatch>> batches() {
        return Collections.unmodifiableMap(batches);
    }

    /**
     * Initiate the flushing of data from the accumulator...this makes all requests immediately ready
     */
    public void beginFlush() {
        this.flushesInProgress.getAndIncrement();
    }

    /**
     * Are there any threads currently appending messages?
     */
    private boolean appendsInProgress() {
        return appendsInProgress.get() > 0;
    }

    /**
     * Mark all partitions as ready to send and block until the send is complete
     */
    public void awaitFlushCompletion() throws InterruptedException {
        try {
            for (ProducerBatch batch : this.incomplete.copyAll())
                batch.produceFuture.await();
        } finally {
            this.flushesInProgress.decrementAndGet();
        }
    }

    /**
     * Check whether there are any pending batches (whether sent or unsent).
     */
    public boolean hasIncomplete() {
        return !this.incomplete.isEmpty();
    }

    /**
     * This function is only called when sender is closed forcefully. It will fail all the
     * incomplete batches and return.
     */
    public void abortIncompleteBatches() {
        // We need to keep aborting the incomplete batch until no thread is trying to append to
        // 1. Avoid losing batches.
        // 2. Free up memory in case appending threads are blocked on buffer full.
        // This is a tight loop but should be able to get through very quickly.
        do {
            abortBatches();
        } while (appendsInProgress());
        // After this point, no thread will append any messages because they will see the close
        // flag set. We need to do the last abort after no thread was appending in case there was a new
        // batch appended by the last appending thread.
        abortBatches();
        this.batches.clear();
    }

    /**
     * Go through incomplete batches and abort them.
     */
    private void abortBatches() {
        abortBatches(new KafkaException("Producer is closed forcefully."));
    }

    /**
     * Abort all incomplete batches (whether they have been sent or not)
     */
    void abortBatches(final RuntimeException reason) {
        for (ProducerBatch batch : incomplete.copyAll()) {
            Deque<ProducerBatch> dq = getDeque(batch.topicPartition);
            synchronized (dq) {
                batch.abortRecordAppends();
                dq.remove(batch);
            }
            batch.abort(reason);
            deallocate(batch);
        }
    }

    /**
     * Abort any batches which have not been drained
     */
    void abortUndrainedBatches(RuntimeException reason) {
        for (ProducerBatch batch : incomplete.copyAll()) {
            Deque<ProducerBatch> dq = getDeque(batch.topicPartition);
            boolean aborted = false;
            synchronized (dq) {
                if ((transactionManager != null && !batch.hasSequence()) || (transactionManager == null && !batch.isClosed())) {
                    aborted = true;
                    batch.abortRecordAppends();
                    dq.remove(batch);
                }
            }
            if (aborted) {
                batch.abort(reason);
                deallocate(batch);
            }
        }
    }

    public void mutePartition(TopicPartition tp) {
        muted.put(tp, Long.MAX_VALUE);
    }

    public void unmutePartition(TopicPartition tp, long throttleUntilTimeMs) {
        muted.put(tp, throttleUntilTimeMs);
    }

    /**
     * Close this accumulator and force all the record buffers to be drained
     */
    public void close() {
        this.closed = true;
    }

    /*
     * Metadata about a record just appended to the record accumulator
     */
    public final static class RecordAppendResult {
        // 添加消息的future
        public final FutureRecordMetadata future;
        // Batch是否满了
        public final boolean batchIsFull;
        // Batch是否新建
        public final boolean newBatchCreated;

        public RecordAppendResult(FutureRecordMetadata future, boolean batchIsFull, boolean newBatchCreated) {
            this.future = future;
            this.batchIsFull = batchIsFull;
            this.newBatchCreated = newBatchCreated;
        }
    }

    /*
     * The set of nodes that have at least one complete record batch in the accumulator
     */
    public final static class ReadyCheckResult {
        public final Set<Node> readyNodes;
        public final long nextReadyCheckDelayMs;
        public final Set<String> unknownLeaderTopics;

        public ReadyCheckResult(Set<Node> readyNodes, long nextReadyCheckDelayMs, Set<String> unknownLeaderTopics) {
            this.readyNodes = readyNodes;
            this.nextReadyCheckDelayMs = nextReadyCheckDelayMs;
            this.unknownLeaderTopics = unknownLeaderTopics;
        }
    }

}
