/*
 * Copyright 2017-2020 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package io.openmessaging.storage.dledger;

import com.alibaba.fastjson.JSON;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicReference;
import java.util.stream.Collectors;

import io.openmessaging.storage.dledger.entry.DLedgerEntry;
import io.openmessaging.storage.dledger.exception.DLedgerException;
import io.openmessaging.storage.dledger.protocol.AppendEntryResponse;
import io.openmessaging.storage.dledger.protocol.DLedgerRequestCode;
import io.openmessaging.storage.dledger.protocol.DLedgerResponseCode;
import io.openmessaging.storage.dledger.protocol.PushEntryRequest;
import io.openmessaging.storage.dledger.protocol.PushEntryResponse;
import io.openmessaging.storage.dledger.store.DLedgerMemoryStore;
import io.openmessaging.storage.dledger.store.DLedgerStore;
import io.openmessaging.storage.dledger.store.file.DLedgerMmapFileStore;
import io.openmessaging.storage.dledger.utils.DLedgerUtils;
import io.openmessaging.storage.dledger.utils.Pair;
import io.openmessaging.storage.dledger.utils.PreConditions;
import io.openmessaging.storage.dledger.utils.Quota;

import static io.openmessaging.storage.dledger.protocol.PushEntryRequest.Type.COMPARE;
import static io.openmessaging.storage.dledger.protocol.PushEntryRequest.Type.TRUNCATE;

/**
 * 条目推送器
 *
 * 说明:
 * 1、Leader启动时new[条目推送器]对象.并且为每个peer都创建一个 {@link EntryDispatcher} 线程,保存到 {@link #dispatcherMap} 这个map中.
 * 2、当给peer追加条目之后,如果peer保存条目成功,则会把这个条目的index下标,记录到 {@link #peerWaterMarksByTerm} map中.
 * 3、
 *
 *
 * 什么时候提交? {@link PushEntryRequest.Type#COMMIT}
 *  当Leader已经把所有的条目,都发送给Follower之后,  writeIndex会大于Leader的endIndex
 *  此时就给Follower发送一个 COMMIT 类型的请求.
 */
public class DLedgerEntryPusher {

    private static final Logger logger = LoggerFactory.getLogger(DLedgerEntryPusher.class);

    private final DLedgerConfig dLedgerConfig;
    private final DLedgerStore dLedgerStore;

    private final MemberState memberState;

    private final DLedgerRpcService dLedgerRpcService;

    /**
     * Leader向每个Follower推送成功的条目下标
     *
     * 说明:Leader节点把index下标对应的条目,发送给Follower.
     *      如果Follower成功地保存此条目.
     *      此时Leader节点:修改下标.
     *
     *
     * 备注:如果集群中有3个节点,则内部map中就会有3个key.
     *
     * 假设同一个term下,内部map的值为:
     * peerId    index
     * n1        5     表示:给peerId=n1的Follower,已经成功保存了index = 5的条目.
     * n2        6     表示:给peerId=n2的Follower,已经成功保存了index = 6的条目.
     * n3        7     表示:给peerId=n3的Follower,已经成功保存了index = 7的条目.
     * n4        8     表示:给peerId=n4的Follower,已经成功保存了index = 8的条目.
     * n0        9   (n0为Leader节点,index=9表示Leader保存最新条目到Leader自己的磁盘中.但是并没有超过半数的Follower ACK)
     */
    private final Map<Long/*term*/, ConcurrentMap<String/*peerId*/, Long/*index*/>> peerWaterMarksByTerm = new ConcurrentHashMap<>();

    /**
     * Follower节点条目处理器.
     *
     * 更多,详见类上注释.
     */
    private final EntryHandler entryHandler;

    /**
     * 超过半数ack检查器
     * 
     */
    private final QuorumAckChecker quorumAckChecker;
    /**
     * Leader节点:在保存完一个新的条目之后,需要发送给其他Follower,并等待超过半数的Follower Ack.
     * 如果超过半数的Follower Ack,则Leader保存新消息的线程,将会给客户端返回rpc response.
     */
    private Map<Long/*term*/, ConcurrentMap<Long/*index*/, TimeoutFuture<AppendEntryResponse>>> pendingAppendResponsesByTerm = new ConcurrentHashMap<>();


    /**
     * Leader节点上,会给每个peer都创建一个条目分发线程 —— {@link EntryDispatcher}
     */
    private final Map<String/*peerId*/, EntryDispatcher> dispatcherMap = new HashMap<>();

    public DLedgerEntryPusher(DLedgerConfig dLedgerConfig,
                              MemberState memberState,
                              DLedgerStore dLedgerStore,
                              DLedgerRpcService dLedgerRpcService) {
        this.dLedgerConfig = dLedgerConfig;
        this.memberState = memberState;
        this.dLedgerStore = dLedgerStore;
        this.dLedgerRpcService = dLedgerRpcService;
        // [Leader节点使用]给每个Follower都创建一个 EntryDispatcher        (如果当前节点是Follower节点,那么线程开启之后就停止了.)
        for (String peer : memberState.getPeerMap().keySet()) {
            if (!peer.equals(memberState.getSelfId())) {
                dispatcherMap.put(peer, new EntryDispatcher(peer, logger));
            }
        }
        // [Follower节点使用]创建条目处理器——处理Leader发送的 PUSH请求.
        this.entryHandler = new EntryHandler(logger);
        this.quorumAckChecker = new QuorumAckChecker(logger);
    }

    /**
     * 启动.
     * 其实就是启动这些线程(Thread).
     */
    public void startup() {
        entryHandler.start();
        quorumAckChecker.start();
        for (EntryDispatcher dispatcher : dispatcherMap.values()) {
            dispatcher.start();
        }
    }

    public void shutdown() {
        entryHandler.shutdown();
        quorumAckChecker.shutdown();
        for (EntryDispatcher dispatcher : dispatcherMap.values()) {
            dispatcher.shutdown();
        }
    }

    public CompletableFuture<PushEntryResponse> handlePush(PushEntryRequest request) throws Exception {
        return entryHandler.handlePush(request);
    }

    /**
     * 理解:这就相当于初始化出 {@link #peerWaterMarksByTerm} 这个map
     *
     * @param term 任期
     * @param env 只是打印日志使用的.感觉这个变量名如果是 opLogType 也就是 操作日志类型  比较好理解.
     */
    private void checkTermForWaterMark(long term, String env) {
        if (!peerWaterMarksByTerm.containsKey(term)) {
            logger.info("Initialize the watermark in {} for term={}", env, term);
            ConcurrentMap<String, Long> waterMarks = new ConcurrentHashMap<>();
            for (String peer : memberState.getPeerMap().keySet()) {
                waterMarks.put(peer, -1L);
            }
            peerWaterMarksByTerm.putIfAbsent(term, waterMarks);
        }
    }

    private void checkTermForPendingMap(long term, String env) {
        if (!pendingAppendResponsesByTerm.containsKey(term)) {
            logger.info("Initialize the pending append map in {} for term={}", env, term);
            pendingAppendResponsesByTerm.putIfAbsent(term, new ConcurrentHashMap<>());
        }
    }

    /**
     * Leader修改peer 最大的index
     *
     * @param term   任期
     * @param peerId 成员id
     * @param index  以同步完数据的下标
     */
    private void updatePeerWaterMark(long term, String peerId, long index) {
        synchronized (peerWaterMarksByTerm) {
            checkTermForWaterMark(term, "updatePeerWaterMark");
            if (peerWaterMarksByTerm.get(term).get(peerId) < index) {
                peerWaterMarksByTerm.get(term).put(peerId, index);
            }
        }
    }

    public long getPeerWaterMark(long term, String peerId) {
        synchronized (peerWaterMarksByTerm) {
            checkTermForWaterMark(term, "getPeerWaterMark");
            return peerWaterMarksByTerm.get(term).get(peerId);
        }
    }

    /**
     * 判断当前term期间中,正在处理中的append请求个数,是否超过最大值.
     */
    public boolean isPendingFull(long currTerm) {
        checkTermForPendingMap(currTerm, "isPendingFull");
        return pendingAppendResponsesByTerm.get(currTerm).size() > dLedgerConfig.getMaxPendingRequestsNum();
    }

    /**
     * Leader给集群添加新数据时,需要等待超过半数的Follower Ack
     *
     * @param entry 条目
     * @param isBatchWait 是否是批量添加数据.
     * @return
     */
    public CompletableFuture<AppendEntryResponse> waitAck(DLedgerEntry entry, boolean isBatchWait) {
        // Leader修改自己节点的index为最新的条目index     假设最新条目的index=69
        updatePeerWaterMark(entry.getTerm(), memberState.getSelfId(), entry.getIndex()); // Leader修改自己的index

        // 如果集群中只有一个节点(也就是只有一个DLedger节点,就是Leader节点.), 则直接返回ack成功.
        if (memberState.getPeerMap().size() == 1) {
            AppendEntryResponse response = new AppendEntryResponse();
            response.setGroup(memberState.getGroup());
            response.setLeaderId(memberState.getSelfId());
            response.setIndex(entry.getIndex());
            response.setTerm(entry.getTerm());
            response.setPos(entry.getPos());
            if (isBatchWait) {
                return BatchAppendFuture.newCompletedFuture(entry.getPos(), response);
            }
            // 这里返回的是'已经完成'的future.
            return AppendFuture.newCompletedFuture(entry.getPos(), response);
        }

        // DLedger集群中有多个节点.  主从
        else {
            checkTermForPendingMap(entry.getTerm(), "waitAck");
            AppendFuture<AppendEntryResponse> future;
            if (isBatchWait) {
                future = new BatchAppendFuture<>(dLedgerConfig.getMaxWaitAckTimeMs());
            } else {
                future = new AppendFuture<>(dLedgerConfig.getMaxWaitAckTimeMs());
            }
            future.setPos(entry.getPos());
            // 把future保存到等待resp的map中.     (在 QuorumAckChecker 异步线程中,会找到对应的future,然后设置成完成.)
            CompletableFuture<AppendEntryResponse> old = pendingAppendResponsesByTerm.get(entry.getTerm()).put(entry.getIndex(), future);
            if (old != null) {
                logger.warn("[MONITOR] get old wait at index={}", entry.getIndex());
            }
            return future;
        }
    }

    public void wakeUpDispatchers() {
        for (EntryDispatcher dispatcher : dispatcherMap.values()) {
            dispatcher.wakeup();
        }
    }

    /**
     * <pre>
     * This thread will check the quorum index and complete the pending requests.
     * 
     * Leader节点才会执行此线程.
     * 
     * 此类的使用场景:
     * 
     *   对于Leader节点:
     *       1.Leader在接收到新数据时,会先保存到Leader自己的磁盘上. {@link DLedgerStore#appendAsLeader }  
     *       2.然后Leader所在的线程就等待超过半数的Follower Ack    {@link DLedgerEntryPusher#waitAck }
     *   在Leader节点等待超过半数Follower ack这期间:
     *       1.Leader为每个Follower创建的 {@link EntryDispatcher} 线程,
     *         会向Follower发送{@link DLedgerRequestCode#PUSH} - {@link PushEntryRequest.Type#APPEND} 类型的RPC请求.
     *       2.而 {@link QuorumAckChecker} 线程会一直扫描 {@link #pendingAppendResponsesByTerm} 这个map,
     *          - 如果超过 {@link DLedgerConfig#maxWaitAckTimeMs} 超时时间,仍然没有超过半数的Follower ACK成功,则说明Leader保存的新条目超时了.
     *          - 正常返回:先计算 quorumIndex.  quorumIndex的意义: 小于quorumIndex的索引,已经有法定人数的节点ack了.
     *                    然后找到所有小于quorumIndex的请求,让对应的future完成:   future.complete(response);
     * </pre>
     */
    private class QuorumAckChecker extends ShutdownAbleThread {

        /**
         * 每隔3秒,在日志中打印一下水印map —— {@link #peerWaterMarksByTerm}
         */
        private long lastPrintWatermarkTimeMs = System.currentTimeMillis();
        /**
         * 检查泄漏的索引.
         * 只要小于或等于quorumIndex的请求,都说明有超过半数的节点成功ack了.
         *
         * 但是有可能会有泄漏的.
         */
        private long lastCheckLeakTimeMs = System.currentTimeMillis();
        private long lastQuorumIndex = -1;

        public QuorumAckChecker(Logger logger) {
            super("QuorumAckChecker-" + memberState.getSelfId(), logger);
        }

        @Override
        public void doWork() {
            try {
                if (DLedgerUtils.elapsed(lastPrintWatermarkTimeMs) > 3000) {
                    logger.info("[{}][{}] term={} ledgerBegin={} ledgerEnd={} committed={} watermarks={}", memberState.getSelfId(), memberState.getRole(), memberState.currTerm(), dLedgerStore.getLedgerBeginIndex(), dLedgerStore.getLedgerEndIndex(), dLedgerStore.getCommittedIndex(), JSON.toJSONString(peerWaterMarksByTerm));
                    lastPrintWatermarkTimeMs = System.currentTimeMillis();
                }
                // 只有Leader才会执行此线程.如果是其他角色,则每隔1毫秒空转一次.
                if (!memberState.isLeader()) {
                    waitForRunning(1);
                    return;
                }

                // 当前Leader节点的term
                long currTerm = memberState.currTerm();
                checkTermForPendingMap(currTerm, "QuorumAckChecker");
                checkTermForWaterMark(currTerm, "QuorumAckChecker");
                checkTermChanged(currTerm);


                // 假设这个map中的值为:
                // peerId    index
                // n1        5     表示:给peerId=n1的Follower,已经成功保存了index = 5的条目.
                // n2        6     表示:给peerId=n2的Follower,已经成功保存了index = 6的条目.
                // n3        7     表示:给peerId=n3的Follower,已经成功保存了index = 7的条目.     这种情况下,index <= 7的APPEND请求,超过半数节点ack了.
                // n4        8     表示:给peerId=n4的Follower,已经成功保存了index = 8的条目.
                // n0        9   (n0为Leader节点,index=9表示Leader保存最新条目到Leader自己的磁盘中.但是并没有超过半数的Follower ACK)
                ConcurrentMap<String/*peerId*/, Long/*index*/> peerWaterMarks = peerWaterMarksByTerm.get(currTerm);

                // 拿到所有的index,倒序排序.
                List<Long/*index*/> sortedWaterMarks = peerWaterMarks.values()
                                                            .stream()
                                                            .sorted(Comparator.reverseOrder())
                                                            .collect(Collectors.toList());
                // 则这里拿到的 quorumIndex = 7
                // note: quorumIndex的意义: 小于quorumIndex的索引,已经有法定人数的节点ack了.
                long quorumIndex = sortedWaterMarks.get(sortedWaterMarks.size() / 2);

                // Leader节点,修改已提交的index. 法定人数ack的索引,在Leader节点才能提交.
                dLedgerStore.updateCommittedIndex(currTerm, quorumIndex);

                ConcurrentMap<Long/*index*/, TimeoutFuture<AppendEntryResponse>> responses = pendingAppendResponsesByTerm.get(currTerm);
                boolean needCheck = false;
                // Follower ack的数量
                int ackNum = 0;
                // 第一次循环:index从7到-1       也就是index= 5/6/7的条目,都有超过半数的节点(包含Follower和Leader节点)ack成功.
                for (long entryIndex = quorumIndex; entryIndex > lastQuorumIndex; entryIndex--) {
                    try {
                        CompletableFuture<AppendEntryResponse> future = responses.remove(entryIndex);
                        // 注意:这种方式判断,可能会存在泄漏的. 比如index=6的future为null.
                        //      但是index=5的future不为null.对于这种情况,就需要泄漏检查.
                        if (future == null) {
                            needCheck = true;
                            break;
                        } else if (!future.isDone()) {
                            // 如果resp尚未完成,则设置为完成.
                            AppendEntryResponse response = new AppendEntryResponse();
                            response.setGroup(memberState.getGroup());
                            response.setTerm(currTerm);
                            response.setIndex(entryIndex);
                            response.setLeaderId(memberState.getSelfId());
                            response.setPos(((AppendFuture) future).getPos());
                            // 这里设置future'完成'之后,在 DLedgerEntryPusher#waitAck 中就可以后续操作了.
                            future.complete(response);
                        }
                        ackNum++;
                    } catch (Throwable t) {
                        logger.error("Error in ack to index={} term={}", entryIndex, currTerm, t);
                    }
                }

                if (ackNum == 0) {
                    // 判断大于法定index的请求,是否等待超时.
                    // 也就是大于quorumIndex的index,还没有法定人数的节点ack,就需要判断是否超时.
                    for (long i = quorumIndex + 1; i < Integer.MAX_VALUE; i++) {
                        TimeoutFuture<AppendEntryResponse> future = responses.get(i);
                        if (future == null) {
                            break;
                        } else if (future.isTimeOut()) {
                            AppendEntryResponse response = new AppendEntryResponse();
                            response.setGroup(memberState.getGroup());
                            response.setCode(DLedgerResponseCode.WAIT_QUORUM_ACK_TIMEOUT.getCode());
                            response.setTerm(currTerm);
                            response.setIndex(i);
                            response.setLeaderId(memberState.getSelfId());
                            future.complete(response);
                        } else {
                            break;
                        }
                    }
                    waitForRunning(1);
                }

                // 泄漏检查                    每隔1秒检查一次      或     主动检查
                if (DLedgerUtils.elapsed(lastCheckLeakTimeMs) > 1000 || needCheck) {
                    // Leader节点,修改自己的水印为endIndex  todo:这里为什么又修改Leader自己的水印为 endIndex 呢?
                    updatePeerWaterMark(currTerm, memberState.getSelfId(), dLedgerStore.getLedgerEndIndex()); // Leader修改自己的index

                    // 检查泄漏的index
                    for (Map.Entry<Long, TimeoutFuture<AppendEntryResponse>> futureEntry : responses.entrySet()) {
                        Long index = futureEntry.getKey();
                        // 只要小于法定index,就说明有超过半数的节点ack了,所以这里就让future正常返回.
                        if (index < quorumIndex) {
                            AppendEntryResponse response = new AppendEntryResponse();
                            response.setGroup(memberState.getGroup());
                            response.setTerm(currTerm);
                            response.setIndex(index);
                            response.setLeaderId(memberState.getSelfId());
                            response.setPos(((AppendFuture) futureEntry.getValue()).getPos());
                            futureEntry.getValue().complete(response);
                            responses.remove(index);
                        }
                    }
                    lastCheckLeakTimeMs = System.currentTimeMillis();
                }
                // 设置最大的法定索引.下一次只需要在 lastQuorumIndex ~ 新quorumIndex 区间遍历.
                lastQuorumIndex = quorumIndex;
            } catch (Throwable t) {
                DLedgerEntryPusher.logger.error("Error in {}", getName(), t);
                DLedgerUtils.sleep(100);
            }
        }

        /**
         * [Leader节点]校验resp响应期间,term是否发生变化.
         * 如果发生变化,则返回错误码. {@link DLedgerResponseCode#TERM_CHANGED}
         *
         * @param currTerm 当前Leader节点上的term
         */
        private void checkTermChanged(long currTerm) {
            // 如果map不为空.   则去检验term变化的响应,然后返回 TERM_CHANGED 错误码.
            if (pendingAppendResponsesByTerm.size() > 1) {
                for (Long term : pendingAppendResponsesByTerm.keySet()) {
                    // 如果请求时的term和现在的term相同,则继续.
                    if (term == currTerm) {
                        continue;
                    }
                    // 为了找到term不同的,然后返回TERM_CHANGED错误码.
                    for (Map.Entry<Long/*index*/, TimeoutFuture<AppendEntryResponse>> futureEntry : pendingAppendResponsesByTerm.get(term).entrySet()) {
                        AppendEntryResponse response = new AppendEntryResponse();
                        response.setGroup(memberState.getGroup());
                        response.setIndex(futureEntry.getKey());
                        response.setCode(DLedgerResponseCode.TERM_CHANGED.getCode());
                        response.setLeaderId(memberState.getLeaderId());
                        logger.info("[TermChange] Will clear the pending response index={} for term changed from {} to {}", futureEntry.getKey(), term, currTerm);
                        futureEntry.getValue().complete(response);
                    }
                    pendingAppendResponsesByTerm.remove(term);
                }
            }
            if (peerWaterMarksByTerm.size() > 1) {
                for (Long term : peerWaterMarksByTerm.keySet()) {
                    if (term == currTerm) {
                        continue;
                    }
                    logger.info("[TermChange] Will clear the watermarks for term changed from {} to {}", term, currTerm);
                    peerWaterMarksByTerm.remove(term);
                }
            }
        }
    }

    /**
     * This thread will be activated by the leader.
     * 该线程将由领导者激活。
     *
     * This thread will push the entry to follower(identified by peerId) and update the completed pushed index to index map.
     * 该线程会将条目推送到关注者（由 peerId 标识）并将完成的推送索引index更新到索引map中 —— {@link #peerWaterMarksByTerm}
     *
     * Should generate a single thread for each peer.
     * Leader将会为每个peer节点生成一个线程. 详见:{@link DLedgerEntryPusher#DLedgerEntryPusher}
     *
     * The push has 4 types:
     * 推送有 4 种类型：
     *   APPEND :    append the entries to the follower
     *               将条目附加到关注者
     *
     *   COMPARE :   if the leader changes, the new leader should compare its entries to follower's
     *               如果领导者发生变化，新领导者应该将其条目与跟随者的条目进行比较
     *
     *   TRUNCATE :  if the leader finished comparing by an index, the leader will send a request to truncate the follower's ledger
     *               如果领导者通过索引完成比较，领导者将发送请求截断追随者的账本
     *
     *   COMMIT:     usually, the leader will attach the committed index with the APPEND request, but if the append requests are few and scattered,
     *               the leader will send a pure request to inform the follower of committed index.
     *               通常leader会在APPEND请求中附加commited index，但是如果append请求少且分散，leader会发送一个纯请求通知follower commit index。
     *
     *   The common transferring between these types are as following:
     *   这些类型之间的常见转换如下：
     *
     *   COMPARE ---- TRUNCATE ---- APPEND ---- COMMIT
     *   ^                             |
     *   |---<-----<------<-------<----|
     *
     */
    private class EntryDispatcher extends ShutdownAbleThread {

        private AtomicReference<PushEntryRequest.Type> type = new AtomicReference<>(COMPARE);
        private long lastPushCommitTimeMs = -1;
        private String peerId;
        private long compareIndex = -1;
        /**
         * Leader下次需要push给Follower的条目下标
         */
        private long writeIndex = -1;

        private long term = -1;
        private String leaderId = null;
        private long lastCheckLeakTimeMs = System.currentTimeMillis();
        /**
         * request已经发送出去,等待response的请求.(主要是为了判断请求是否超时)
         *    pendingMap.put(index, System.currentTimeMillis());
         */
        private ConcurrentMap<Long/*index*/, Long/*当前时间戳currentTimeMillis()*/> pendingMap = new ConcurrentHashMap<>();
        /**
         * 正在处理中的请求,最大数量.
         *
         * pendingMap.size() >= maxPendingSize
         */
        private static final int maxPendingSize = 1000;

        /**
         * todo:
         */
        private ConcurrentMap<Long, Pair<Long, Integer>> batchPendingMap = new ConcurrentHashMap<>();
        /**
         * 批量追加条目的请求
         */
        private PushEntryRequest batchAppendEntryRequest = new PushEntryRequest();
        /**
         * todo:
         */
        private Quota quota = new Quota(dLedgerConfig.getPeerPushQuota());

        public EntryDispatcher(String peerId, Logger logger) {
            super("EntryDispatcher-" + memberState.getSelfId() + "-" + peerId, logger);
            this.peerId = peerId;
        }

        private boolean checkAndFreshState() {
            // 检查是否为Leader角色.
            if (!memberState.isLeader()) {
                return false;
            }
            // 初始化状态下,这些变量都是默认值.则会刷新状态.
            if (term != memberState.currTerm() || leaderId == null || !leaderId.equals(memberState.getLeaderId())) {
                synchronized (memberState) {
                    if (!memberState.isLeader()) {
                        return false;
                    }
                    PreConditions.check(memberState.getSelfId().equals(memberState.getLeaderId()), DLedgerResponseCode.UNKNOWN);

                    term = memberState.currTerm();
                    // 当前集群中Leader节点的ID就是自己的id。(因为只有leader节点,才会执行这个线程.其他角色,这线程就直接退出了)
                    leaderId = memberState.getSelfId();
                    // 服务启动之后,初始化状态时,需要对比
                    changeState(-1, COMPARE);
                }
            }
            return true;
        }

        private PushEntryRequest buildPushRequest(DLedgerEntry entry, PushEntryRequest.Type target) {
            PushEntryRequest request = new PushEntryRequest();
            request.setGroup(memberState.getGroup());
            request.setRemoteId(peerId);  // 请求发送给谁
            request.setLeaderId(leaderId);// 当前集群的LeaderId
            request.setTerm(term);
            request.setEntry(entry);
            request.setType(target);
            request.setCommitIndex(dLedgerStore.getCommittedIndex());
            return request;
        }

        private void resetBatchAppendEntryRequest() {
            batchAppendEntryRequest.setGroup(memberState.getGroup());
            batchAppendEntryRequest.setRemoteId(peerId);
            batchAppendEntryRequest.setLeaderId(leaderId);
            batchAppendEntryRequest.setTerm(term);
            batchAppendEntryRequest.setType(PushEntryRequest.Type.APPEND);
            batchAppendEntryRequest.clear();
        }

        private void checkQuotaAndWait(DLedgerEntry entry) {
            if (dLedgerStore.getLedgerEndIndex() - entry.getIndex() <= maxPendingSize) {
                return;
            }
            if (dLedgerStore instanceof DLedgerMemoryStore) {
                return;
            }
            DLedgerMmapFileStore mmapFileStore = (DLedgerMmapFileStore) dLedgerStore;
            if (mmapFileStore.getDataFileList().getMaxWrotePosition() - entry.getPos() < dLedgerConfig.getPeerPushThrottlePoint()) {
                return;
            }
            quota.sample(entry.getSize());
            if (quota.validateNow()) {
                long leftNow = quota.leftNow();
                logger.warn("[Push-{}]Quota exhaust, will sleep {}ms", peerId, leftNow);
                DLedgerUtils.sleep(leftNow);
            }
        }

        /**
         * Leader向Follower 推送(追加)条目数据.
         *
         * 主从数据同步线程:leader给Follower发送增量数据.
         * 如果Follower返回成功,则记录peerId保存成功的index
         * 然后,唤醒QuorumAckChecker —— 检查是否超过半数的Follower都保存成功.
         *
         * @param index 条目下标
         */
        private void doAppendInner(long index) throws Exception {
            // 根据下标,获取条目
            DLedgerEntry entry = getDLedgerEntryForAppend(index);
            if (null == entry) {
                return;
            }
            // todo:
            checkQuotaAndWait(entry);
            // 构建APPEND请求
            PushEntryRequest request = buildPushRequest(entry, PushEntryRequest.Type.APPEND);
            // rpc 发送请求
            CompletableFuture<PushEntryResponse> responseFuture = dLedgerRpcService.push(request);
            // 记录发送请求的时间:   条目的下标index -> 发送请求的时间
            pendingMap.put(index, System.currentTimeMillis());
            // 响应之后处理.
            responseFuture.whenComplete((response, ex) -> {
                try {
                    PreConditions.check(ex == null, DLedgerResponseCode.UNKNOWN);
                    DLedgerResponseCode responseCode = DLedgerResponseCode.valueOf(response.getCode());
                    switch (responseCode) {
                        case SUCCESS:
                            pendingMap.remove(response.getIndex());
                            // 修改peer下标
                            updatePeerWaterMark(response.getTerm(), peerId, response.getIndex()); // Leader修改Follower的index
                            // 唤醒QuorumAckChecker —— 检查是否超过半数的Follower都保存成功.
                            quorumAckChecker.wakeup();
                            break;
                        case INCONSISTENT_STATE:
                            logger.info("[Push-{}]Get INCONSISTENT_STATE when push index={} term={}", peerId, response.getIndex(), response.getTerm());
                            changeState(-1, COMPARE);
                            break;
                        default:
                            logger.warn("[Push-{}]Get error response code {} {}", peerId, responseCode, response.baseInfo());
                            break;
                    }
                } catch (Throwable t) {
                    logger.error("", t);
                }
            });
            lastPushCommitTimeMs = System.currentTimeMillis();
        }

        private DLedgerEntry getDLedgerEntryForAppend(long index) {
            DLedgerEntry entry;
            try {
                entry = dLedgerStore.get(index);
            } catch (DLedgerException e) {
                //  Do compare, in case the ledgerBeginIndex get refreshed.
                if (DLedgerResponseCode.INDEX_LESS_THAN_LOCAL_BEGIN.equals(e.getCode())) {
                    logger.info("[Push-{}]Get INDEX_LESS_THAN_LOCAL_BEGIN when requested index is {}, try to compare", peerId, index);
                    changeState(-1, COMPARE);
                    return null;
                }
                throw e;
            }
            PreConditions.check(entry != null, DLedgerResponseCode.UNKNOWN, "writeIndex=%d", index);
            return entry;
        }

        private void doCommit() throws Exception {
            if (DLedgerUtils.elapsed(lastPushCommitTimeMs) > 1000) {
                PushEntryRequest request = buildPushRequest(null, PushEntryRequest.Type.COMMIT);
                // 发送请求 并 忽略response返回结果.
                dLedgerRpcService.push(request);
                lastPushCommitTimeMs = System.currentTimeMillis();
            }
        }

        private void doCheckAppendResponse() throws Exception {
            long peerWaterMarkIndex = getPeerWaterMark(term, peerId);
            Long sendTimeMs = pendingMap.get(peerWaterMarkIndex + 1);
            if (sendTimeMs != null && System.currentTimeMillis() - sendTimeMs > dLedgerConfig.getMaxPushTimeOutMs()) {
                logger.warn("[Push-{}]Retry to push entry at {}", peerId, peerWaterMarkIndex + 1);
                doAppendInner(peerWaterMarkIndex + 1);
            }
        }

        /**
         * 主从数据同步线程:leader给Follower发送增量数据.
         */
        private void doAppend() throws Exception {
            while (true) {
                if (!checkAndFreshState()) {
                    break;
                }
                if (type.get() != PushEntryRequest.Type.APPEND) {
                    break;
                }
                if (writeIndex > dLedgerStore.getLedgerEndIndex()) {
                    // 当Leader已经把所有的条目,都发送给Follower之后,  writeIndex会大于Leader的endIndex
                    doCommit();
                    doCheckAppendResponse();
                    break;
                }
                if (pendingMap.size() >= maxPendingSize || (DLedgerUtils.elapsed(lastCheckLeakTimeMs) > 1000)) {
                    long peerWaterMark = getPeerWaterMark(term, peerId);
                    for (Long index : pendingMap.keySet()) {
                        if (index < peerWaterMark) {
                            pendingMap.remove(index); // todo:这里为什么要删除呢?这个map到底什么作用?
                        }
                    }
                    lastCheckLeakTimeMs = System.currentTimeMillis();
                }
                // 正在处理中的请求数太多了.
                if (pendingMap.size() >= maxPendingSize) {
                    doCheckAppendResponse();
                    break;
                }
                // Leader向Follower 推送(追加)条目数据.
                doAppendInner(writeIndex);

                // 下一次推送下标自增.
                writeIndex++;
            }
        }

        private void sendBatchAppendEntryRequest() throws Exception {
            batchAppendEntryRequest.setCommitIndex(dLedgerStore.getCommittedIndex());
            CompletableFuture<PushEntryResponse> responseFuture = dLedgerRpcService.push(batchAppendEntryRequest);
            batchPendingMap.put(batchAppendEntryRequest.getFirstEntryIndex(), new Pair<>(System.currentTimeMillis(), batchAppendEntryRequest.getCount()));
            responseFuture.whenComplete((x, ex) -> {
                try {
                    PreConditions.check(ex == null, DLedgerResponseCode.UNKNOWN);
                    DLedgerResponseCode responseCode = DLedgerResponseCode.valueOf(x.getCode());
                    switch (responseCode) {
                        case SUCCESS:
                            batchPendingMap.remove(x.getIndex());
                            updatePeerWaterMark(x.getTerm(), peerId, x.getIndex()); // Leader修改Follower的index
                            break;
                        case INCONSISTENT_STATE:
                            logger.info("[Push-{}]Get INCONSISTENT_STATE when batch push index={} term={}", peerId, x.getIndex(), x.getTerm());
                            changeState(-1, COMPARE);
                            break;
                        default:
                            logger.warn("[Push-{}]Get error response code {} {}", peerId, responseCode, x.baseInfo());
                            break;
                    }
                } catch (Throwable t) {
                    logger.error("", t);
                }
            });
            lastPushCommitTimeMs = System.currentTimeMillis();
            batchAppendEntryRequest.clear();
        }

        private void doBatchAppendInner(long index) throws Exception {
            DLedgerEntry entry = getDLedgerEntryForAppend(index);
            if (null == entry) {
                return;
            }
            batchAppendEntryRequest.addEntry(entry);
            if (batchAppendEntryRequest.getTotalSize() >= dLedgerConfig.getMaxBatchPushSize()) {
                sendBatchAppendEntryRequest();
            }
        }

        private void doCheckBatchAppendResponse() throws Exception {
            long peerWaterMark = getPeerWaterMark(term, peerId);
            Pair pair = batchPendingMap.get(peerWaterMark + 1);
            if (pair != null && System.currentTimeMillis() - (long) pair.getKey() > dLedgerConfig.getMaxPushTimeOutMs()) {
                long firstIndex = peerWaterMark + 1;
                long lastIndex = firstIndex + (int) pair.getValue() - 1;
                logger.warn("[Push-{}]Retry to push entry from {} to {}", peerId, firstIndex, lastIndex);
                batchAppendEntryRequest.clear();
                for (long i = firstIndex; i <= lastIndex; i++) {
                    DLedgerEntry entry = dLedgerStore.get(i);
                    batchAppendEntryRequest.addEntry(entry);
                }
                sendBatchAppendEntryRequest();
            }
        }

        private void doBatchAppend() throws Exception {
            while (true) {
                if (!checkAndFreshState()) {
                    break;
                }
                if (type.get() != PushEntryRequest.Type.APPEND) {
                    break;
                }
                if (writeIndex > dLedgerStore.getLedgerEndIndex()) {
                    if (batchAppendEntryRequest.getCount() > 0) {
                        sendBatchAppendEntryRequest();
                    }
                    doCommit();
                    doCheckBatchAppendResponse();
                    break;
                }
                if (batchPendingMap.size() >= maxPendingSize || (DLedgerUtils.elapsed(lastCheckLeakTimeMs) > 1000)) {
                    long peerWaterMark = getPeerWaterMark(term, peerId);
                    for (Map.Entry<Long, Pair<Long, Integer>> entry : batchPendingMap.entrySet()) {
                        if (entry.getKey() + entry.getValue().getValue() - 1 <= peerWaterMark) {
                            batchPendingMap.remove(entry.getKey());
                        }
                    }
                    lastCheckLeakTimeMs = System.currentTimeMillis();
                }
                if (batchPendingMap.size() >= maxPendingSize) {
                    doCheckBatchAppendResponse();
                    break;
                }
                doBatchAppendInner(writeIndex);
                writeIndex++;
            }
        }

        /**
         * Follower存在脏数据,向Follower发送截断请求.
         *
         * @param truncateIndex 需要截断的下标.(在Follower中,大于这个下标的都需要删掉.)
         */
        private void doTruncate(long truncateIndex) throws Exception {
            PreConditions.check(type.get() == TRUNCATE, DLedgerResponseCode.UNKNOWN);

            DLedgerEntry truncateEntry = dLedgerStore.get(truncateIndex);
            PreConditions.check(truncateEntry != null, DLedgerResponseCode.UNKNOWN);
            logger.info("[Push-{}]Will push data to truncate truncateIndex={} pos={}", peerId, truncateIndex, truncateEntry.getPos());

            // 构建请求
            PushEntryRequest truncateRequest = buildPushRequest(truncateEntry, TRUNCATE);
            // 发送并等待请求
            PushEntryResponse truncateResponse = dLedgerRpcService.push(truncateRequest).get(3, TimeUnit.SECONDS);

            PreConditions.check(truncateResponse != null, DLedgerResponseCode.UNKNOWN, "truncateIndex=%d", truncateIndex);
            PreConditions.check(truncateResponse.getCode() == DLedgerResponseCode.SUCCESS.getCode(), DLedgerResponseCode.valueOf(truncateResponse.getCode()), "truncateIndex=%d", truncateIndex);

            lastPushCommitTimeMs = System.currentTimeMillis();
            // 修改状态为 APPEND
            changeState(truncateIndex, PushEntryRequest.Type.APPEND);
        }

        /**
         * 修改条目分发的状态
         *
         * @param index  条目下标              初始化时,index = -1
         * @param target 推送条目请求的类别     初始化时,target = COMPARE
         */
        private synchronized void changeState(long index, PushEntryRequest.Type target) {
            logger.info("[Push-{}]Change state from {} to {} at {}", peerId, type.get(), target, index);
            switch (target) {
                case APPEND:  // 状态如果要改成'追加'时,会指定index(也就是从哪个条目开始追加.)
                    compareIndex = -1;
                    // 修改Follower的条目下标
                    updatePeerWaterMark(term, peerId, index); // Leader修改Follower的index
                    // todo:超过半数ack检查器
                    quorumAckChecker.wakeup();

                    // 重要:Leader下次需要push给Follower的条目下标...下一次Leader从这个下标开始给Follower推送条目.
                    writeIndex = index + 1;

                    // 是否启动批量push  (Leader批量向Follower追加条目.) 默认为false
                    if (dLedgerConfig.isEnableBatchPush()) {
                        resetBatchAppendEntryRequest();
                    }
                    break;
                case COMPARE:
                    // 如果当前状态是 APPEND ,如果不是,则直接修改type为'COMPARE'.
                    //               todo:如果是:则.........   问题:什么情况下会从 APPEND 变成 COMPARE 状态呢?
                    if (this.type.compareAndSet(PushEntryRequest.Type.APPEND, COMPARE)) {
                        compareIndex = -1;
                        if (dLedgerConfig.isEnableBatchPush()) {
                            batchPendingMap.clear();
                        } else {
                            pendingMap.clear();
                        }
                    }
                    break;
                case TRUNCATE:
                    compareIndex = -1;
                    break;
                default:
                    break;
            }
            type.set(target);
        }

        /**
         * 当前Leader节点和peer比较endIndex
         *
         * Leader会给每个peer发送 COMPARE 请求.
         *
         * 如果peer成功response响应请求:           也就是  response.getCode() == SUCCESS.getCode()
         * 1.如果Leader和Follower的endIndex相等，则改为追加状态。
         * 2.如果追随者有一些脏条目，则截断追随者。
         *    先设置 truncateIndex,如果truncateIndex不等于-1 ,则把状态改成 TRUNCATE
         * 3.截断成功后,会把状态改成追加状态
         */
        private void doCompare() throws Exception {
            while (true) {
                if (!checkAndFreshState()) {
                    break;
                }
                if (type.get() != COMPARE && type.get() != TRUNCATE) {
                    break;
                }
                if (compareIndex == -1 && dLedgerStore.getLedgerEndIndex() == -1) {
                    break;
                }
                //revise the compareIndex
                if (compareIndex == -1) {
                    compareIndex = dLedgerStore.getLedgerEndIndex();
                    logger.info("[Push-{}][DoCompare] compareIndex=-1 means start to compare", peerId);
                } else if (compareIndex > dLedgerStore.getLedgerEndIndex() || compareIndex < dLedgerStore.getLedgerBeginIndex()) {
                    logger.info("[Push-{}][DoCompare] compareIndex={} out of range {}-{}", peerId, compareIndex, dLedgerStore.getLedgerBeginIndex(), dLedgerStore.getLedgerEndIndex());
                    compareIndex = dLedgerStore.getLedgerEndIndex();
                }

                // 从Leader中根据index获取条目
                DLedgerEntry entry = dLedgerStore.get(compareIndex);
                PreConditions.check(entry != null, DLedgerResponseCode.INTERNAL_ERROR, "compareIndex=%d", compareIndex);

                // 构建 COMPARE请求
                PushEntryRequest request = buildPushRequest(entry, COMPARE);
                // 发送请求      Leader发送给每个peer    (当前是在 EntryDispatcher 线程中.所以这个线程会给指定的peer发送请求.)
                CompletableFuture<PushEntryResponse> responseFuture = dLedgerRpcService.push(request);
                PushEntryResponse response = responseFuture.get(3, TimeUnit.SECONDS);

                PreConditions.check(response != null, DLedgerResponseCode.INTERNAL_ERROR, "compareIndex=%d", compareIndex);
                PreConditions.check(response.getCode() == DLedgerResponseCode.INCONSISTENT_STATE.getCode() || response.getCode() == DLedgerResponseCode.SUCCESS.getCode(), DLedgerResponseCode.valueOf(response.getCode()), "compareIndex=%d", compareIndex);

                long truncateIndex = -1;

                if (response.getCode() == DLedgerResponseCode.SUCCESS.getCode()) {
                    /*
                     * The comparison is successful:
                     * 1.Just change to append state, if the follower's end index is equal the compared index.
                     * 2.Truncate the follower, if the follower has some dirty entries.
                     *
                     * 比较成功：
                     * 1.如果Leader和Follower的endIndex相等，则改为追加状态。
                     * 2.如果追随者有一些脏条目，则截断追随者。
                     *    先设置 truncateIndex,如果truncateIndex不等于-1 ,则把状态改成 TRUNCATE
                     */
                    if (compareIndex == response.getEndIndex()) {
                        changeState(compareIndex, PushEntryRequest.Type.APPEND);
                        break;
                    } else {
                        truncateIndex = compareIndex;
                    }
                } else if (response.getEndIndex() < dLedgerStore.getLedgerBeginIndex()
                      || response.getBeginIndex() > dLedgerStore.getLedgerEndIndex()) {
                    /*
                     The follower's entries does not intersect with the leader.
                     This usually happened when the follower has crashed for a long time while the leader has deleted the expired entries.
                     Just truncate the follower.
                     */
                    truncateIndex = dLedgerStore.getLedgerBeginIndex();
                } else if (compareIndex < response.getBeginIndex()) {
                    /*
                     The compared index is smaller than the follower's begin index.
                     This happened rarely, usually means some disk damage.
                     Just truncate the follower.
                     */
                    truncateIndex = dLedgerStore.getLedgerBeginIndex();
                } else if (compareIndex > response.getEndIndex()) {
                    /*
                     The compared index is bigger than the follower's end index.
                     This happened frequently. For the compared index is usually starting from the end index of the leader.
                     */
                    compareIndex = response.getEndIndex();
                } else {
                    /*
                      Compare failed and the compared index is in the range of follower's entries.
                     */
                    compareIndex--;
                }
                /*
                 The compared index is smaller than the leader's begin index, truncate the follower.
                 */
                if (compareIndex < dLedgerStore.getLedgerBeginIndex()) {
                    truncateIndex = dLedgerStore.getLedgerBeginIndex();
                }
                /*
                 If get value for truncateIndex, do it right now.
                 */
                if (truncateIndex != -1) {
                    // 修改状态为TRUNCATE
                    changeState(truncateIndex, TRUNCATE);
                    // 发送截断请求
                    doTruncate(truncateIndex);
                    break;
                }
            }
        }

        @Override
        public void doWork() {
            try {
                // 检查并刷新状态.如果不成功,则返回.
                // 注意:只有Leader角色才会运行这个线程.其他角色都直接线程结束了.
                if (!checkAndFreshState()) {
                    waitForRunning(1);
                    return;
                }


                // 追加请求
                if (type.get() == PushEntryRequest.Type.APPEND) {
                    if (dLedgerConfig.isEnableBatchPush()) {
                        doBatchAppend(); //批量追加条目 默认false不启用  (批量push时,每次最多push字节数为4kb)
                    } else {
                        doAppend(); // 追加单个条目
                    }
                } else {
                    doCompare(); // 比较      初始化状态时type=COMPARE
                    //  也就是Leader需要先和Follower对比一下endIndex,如果Follower存在脏数据.则需要截断Follower的数据.
                }

                // 调用yield方法会让当前线程交出CPU权限，让CPU去执行其他的线程。
                Thread.yield();
            } catch (Throwable t) {
                DLedgerEntryPusher.logger.error("[Push-{}]Error in {} writeIndex={} compareIndex={}", peerId, getName(), writeIndex, compareIndex, t);
                changeState(-1, COMPARE);
                DLedgerUtils.sleep(500);
            }
        }
    }

    /**
     * This thread will be activated by the follower.
     * Accept the push request and order it by the index, then append to ledger store one by one.
     *
     * Follower节点上的条目处理器.
     * 该线程将由follower激活。
     * 接受Leader推送过来的请求并按index排序，然后一一追加存储服务中.
     *
     * Follower收到Leader发送的 PUSH请求之后.会交给这个'条目处理器'来处理.
     *
     * 本质上也是个线程 :
     * 1.Follower收到request之后,先保存队列或者map中. 如果是 APPEND 条目追加请求,则会根据index作为key,保存到map中.{@link #writeRequestMap}
     * 2.使用线程去处理队列或者map中的请求.避免不必要的阻塞.
     *    - 如果队列中有COMMIT COMPARE TRUNCATE这三种类型的req,则优先处理.然后再处理APPEND类型的req.
     *    - 当处理APPEND类型的请求时,会按照index从小到大的顺序,一个一个追加到存储服务中.
     *    - 需要处理的下一个index是: endIndex + 1  ,如果不存在,则返回给Leader错误:
     */
    private class EntryHandler extends ShutdownAbleThread {

        private long lastCheckFastForwardTimeMs = System.currentTimeMillis();

        /**
         * Leader给Follower发送的推送请求 {@link DLedgerRequestCode#PUSH},保存到这个map中.然后使用异步线程去处理请求.
         */
        ConcurrentMap<Long/*index*/, Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>>> writeRequestMap = new ConcurrentHashMap<>();

        /**
         * 队列:存放 COMMIT COMPARE TRUNCATE 三种类型的{@link DLedgerRequestCode#PUSH}请求
         * 异步线程中,会有限处理这三种请求.
         * 最后再处理 APPEND 类型的请求.
         */
        BlockingQueue<Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>>> compareOrTruncateRequests = new ArrayBlockingQueue<>(100);

        public EntryHandler(Logger logger) {
            super("EntryHandler-" + memberState.getSelfId(), logger);
        }

        /**
         * 处理push请求
         *
         * @param request 请求
         * @return 响应future
         */
        public CompletableFuture<PushEntryResponse> handlePush(PushEntryRequest request) throws Exception {
            //The timeout should smaller than the remoting layer's request timeout
            // 处理请求的超市时间。  超时应该小于远程层的请求超时
            CompletableFuture<PushEntryResponse> future = new TimeoutFuture<>(1000);
            switch (request.getType()) {
                case APPEND: // 追加条目
                    if (request.isBatch()) {
                        PreConditions.check(request.getBatchEntry() != null && request.getCount() > 0, DLedgerResponseCode.UNEXPECTED_ARGUMENT);
                    } else {
                        PreConditions.check(request.getEntry() != null, DLedgerResponseCode.UNEXPECTED_ARGUMENT);
                    }
                    long index = request.getFirstEntryIndex();
                    // 把'APPEND'类型的请求,保存到map中,使用异步线程处理.避免阻塞.
                    Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>> old = writeRequestMap.putIfAbsent(index, new Pair<>(request, future));
                    if (old != null) {
                        // 如果此index已经接受到Leader的请求,则直接返回错误 —— REPEATED_PUSH
                        logger.warn("[MONITOR]The index {} has already existed with {} and curr is {}", index, old.getKey().baseInfo(), request.baseInfo());
                        future.complete(buildResponse(request, DLedgerResponseCode.REPEATED_PUSH.getCode()));
                    }
                    break;
                case COMMIT:
                    // 提交 类型的请求,也放到队列中.   异步线程中会先处理  COMMIT COMPARE TRUNCATE 这三种类型的请求.
                    compareOrTruncateRequests.put(new Pair<>(request, future));
                    break;
                case COMPARE:
                case TRUNCATE:
                    PreConditions.check(request.getEntry() != null, DLedgerResponseCode.UNEXPECTED_ARGUMENT);
                    // 先把目前已经从Leader接收到的所有的APPEND请求,都先清空.
                    writeRequestMap.clear();
                    // 把 COMPARE 和 TRUNCATE 类型的请求,都放到队列中.
                    compareOrTruncateRequests.put(new Pair<>(request, future));
                    break;
                default:
                    // 未知的请求类型.
                    logger.error("[BUG]Unknown type {} from {}", request.getType(), request.baseInfo());
                    future.complete(buildResponse(request, DLedgerResponseCode.UNEXPECTED_ARGUMENT.getCode()));
                    break;
            }
            /**
             * 唤醒此线程
             * 详见 {@link #doWork()} 多线程方法.         (有新的请求来了,需要异步唤醒异步线程,让异步线程开始干活了.)
             */
            wakeup();
            return future;
        }

        private PushEntryResponse buildResponse(PushEntryRequest request, int code) {
            PushEntryResponse response = new PushEntryResponse();
            response.setGroup(request.getGroup());
            response.setCode(code);
            response.setTerm(request.getTerm());
            if (request.getType() != PushEntryRequest.Type.COMMIT) {
                response.setIndex(request.getEntry().getIndex());
            }
            response.setBeginIndex(dLedgerStore.getLedgerBeginIndex());
            response.setEndIndex(dLedgerStore.getLedgerEndIndex());
            return response;
        }

        private PushEntryResponse buildBatchAppendResponse(PushEntryRequest request, int code) {
            PushEntryResponse response = new PushEntryResponse();
            response.setGroup(request.getGroup());
            response.setCode(code);
            response.setTerm(request.getTerm());
            response.setIndex(request.getLastEntryIndex());
            response.setBeginIndex(dLedgerStore.getLedgerBeginIndex());
            response.setEndIndex(dLedgerStore.getLedgerEndIndex());
            return response;
        }

        private void handleDoAppend(long writeIndex, PushEntryRequest request, CompletableFuture<PushEntryResponse> future) {
            try {
                PreConditions.check(writeIndex == request.getEntry().getIndex(), DLedgerResponseCode.INCONSISTENT_STATE);

                // 作为Follower角色追加条目
                DLedgerEntry entry = dLedgerStore.appendAsFollower(request.getEntry(), request.getTerm(), request.getLeaderId());
                PreConditions.check(entry.getIndex() == writeIndex, DLedgerResponseCode.INCONSISTENT_STATE);
                // 返回成功
                future.complete(buildResponse(request, DLedgerResponseCode.SUCCESS.getCode()));
                // 修改Follower已提交的下标.
                dLedgerStore.updateCommittedIndex(request.getTerm(), request.getCommitIndex());
            } catch (Throwable t) {
                logger.error("[HandleDoWrite] writeIndex={}", writeIndex, t);
                future.complete(buildResponse(request, DLedgerResponseCode.INCONSISTENT_STATE.getCode()));
            }
        }

        private CompletableFuture<PushEntryResponse> handleDoCompare(long compareIndex, PushEntryRequest request,
            CompletableFuture<PushEntryResponse> future) {
            try {
                PreConditions.check(compareIndex == request.getEntry().getIndex(), DLedgerResponseCode.UNKNOWN);
                PreConditions.check(request.getType() == COMPARE, DLedgerResponseCode.UNKNOWN);
                DLedgerEntry local = dLedgerStore.get(compareIndex);
                PreConditions.check(request.getEntry().equals(local), DLedgerResponseCode.INCONSISTENT_STATE);
                future.complete(buildResponse(request, DLedgerResponseCode.SUCCESS.getCode()));
            } catch (Throwable t) {
                logger.error("[HandleDoCompare] compareIndex={}", compareIndex, t);
                future.complete(buildResponse(request, DLedgerResponseCode.INCONSISTENT_STATE.getCode()));
            }
            return future;
        }

        private CompletableFuture<PushEntryResponse> handleDoCommit(long committedIndex, PushEntryRequest request,
            CompletableFuture<PushEntryResponse> future) {
            try {
                PreConditions.check(committedIndex == request.getCommitIndex(), DLedgerResponseCode.UNKNOWN);
                PreConditions.check(request.getType() == PushEntryRequest.Type.COMMIT, DLedgerResponseCode.UNKNOWN);
                dLedgerStore.updateCommittedIndex(request.getTerm(), committedIndex);
                future.complete(buildResponse(request, DLedgerResponseCode.SUCCESS.getCode()));
            } catch (Throwable t) {
                logger.error("[HandleDoCommit] committedIndex={}", request.getCommitIndex(), t);
                future.complete(buildResponse(request, DLedgerResponseCode.UNKNOWN.getCode()));
            }
            return future;
        }

        private CompletableFuture<PushEntryResponse> handleDoTruncate(long truncateIndex, PushEntryRequest request,
            CompletableFuture<PushEntryResponse> future) {
            try {
                logger.info("[HandleDoTruncate] truncateIndex={} pos={}", truncateIndex, request.getEntry().getPos());
                PreConditions.check(truncateIndex == request.getEntry().getIndex(), DLedgerResponseCode.UNKNOWN);
                PreConditions.check(request.getType() == TRUNCATE, DLedgerResponseCode.UNKNOWN);
                long index = dLedgerStore.truncate(request.getEntry(), request.getTerm(), request.getLeaderId());
                PreConditions.check(index == truncateIndex, DLedgerResponseCode.INCONSISTENT_STATE);
                future.complete(buildResponse(request, DLedgerResponseCode.SUCCESS.getCode()));
                dLedgerStore.updateCommittedIndex(request.getTerm(), request.getCommitIndex());
            } catch (Throwable t) {
                logger.error("[HandleDoTruncate] truncateIndex={}", truncateIndex, t);
                future.complete(buildResponse(request, DLedgerResponseCode.INCONSISTENT_STATE.getCode()));
            }
            return future;
        }

        private void handleDoBatchAppend(long writeIndex, PushEntryRequest request,
            CompletableFuture<PushEntryResponse> future) {
            try {
                PreConditions.check(writeIndex == request.getFirstEntryIndex(), DLedgerResponseCode.INCONSISTENT_STATE);
                for (DLedgerEntry entry : request.getBatchEntry()) {
                    dLedgerStore.appendAsFollower(entry, request.getTerm(), request.getLeaderId());
                }
                future.complete(buildBatchAppendResponse(request, DLedgerResponseCode.SUCCESS.getCode()));
                dLedgerStore.updateCommittedIndex(request.getTerm(), request.getCommitIndex());
            } catch (Throwable t) {
                logger.error("[HandleDoBatchAppend]", t);
            }

        }

        private void checkAppendFuture(long endIndex) {
            long minFastForwardIndex = Long.MAX_VALUE;
            for (Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>> pair : writeRequestMap.values()) {
                long firstEntryIndex = pair.getKey().getFirstEntryIndex();
                long lastEntryIndex = pair.getKey().getLastEntryIndex();
                //Fall behind
                if (lastEntryIndex <= endIndex) {
                    try {
                        if (pair.getKey().isBatch()) {
                            for (DLedgerEntry dLedgerEntry : pair.getKey().getBatchEntry()) {
                                PreConditions.check(dLedgerEntry.equals(dLedgerStore.get(dLedgerEntry.getIndex())), DLedgerResponseCode.INCONSISTENT_STATE);
                            }
                        } else {
                            DLedgerEntry dLedgerEntry = pair.getKey().getEntry();
                            PreConditions.check(dLedgerEntry.equals(dLedgerStore.get(dLedgerEntry.getIndex())), DLedgerResponseCode.INCONSISTENT_STATE);
                        }
                        pair.getValue().complete(buildBatchAppendResponse(pair.getKey(), DLedgerResponseCode.SUCCESS.getCode()));
                        logger.warn("[PushFallBehind]The leader pushed an batch append entry last index={} smaller than current ledgerEndIndex={}, maybe the last ack is missed", lastEntryIndex, endIndex);
                    } catch (Throwable t) {
                        logger.error("[PushFallBehind]The leader pushed an batch append entry last index={} smaller than current ledgerEndIndex={}, maybe the last ack is missed", lastEntryIndex, endIndex, t);
                        pair.getValue().complete(buildBatchAppendResponse(pair.getKey(), DLedgerResponseCode.INCONSISTENT_STATE.getCode()));
                    }
                    writeRequestMap.remove(pair.getKey().getFirstEntryIndex());
                    continue;
                }
                if (firstEntryIndex == endIndex + 1) {
                    return;
                }
                TimeoutFuture<PushEntryResponse> future = (TimeoutFuture<PushEntryResponse>) pair.getValue();
                if (!future.isTimeOut()) {
                    continue;
                }
                if (firstEntryIndex < minFastForwardIndex) {
                    minFastForwardIndex = firstEntryIndex;
                }
            }
            if (minFastForwardIndex == Long.MAX_VALUE) {
                return;
            }
            Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>> pair = writeRequestMap.get(minFastForwardIndex);
            if (pair == null) {
                return;
            }
            logger.warn("[PushFastForward] ledgerEndIndex={} entryIndex={}", endIndex, minFastForwardIndex);
            pair.getValue().complete(buildBatchAppendResponse(pair.getKey(), DLedgerResponseCode.INCONSISTENT_STATE.getCode()));
        }
        /**
         * The leader does push entries to follower, and record the pushed index. But in the following conditions, the push may get stopped.
         *   * If the follower is abnormally shutdown, its ledger end index may be smaller than before. At this time, the leader may push fast-forward entries, and retry all the time.
         *   * If the last ack is missed, and no new message is coming in.The leader may retry push the last message, but the follower will ignore it.
         * @param endIndex
         */
        private void checkAbnormalFuture(long endIndex) {
            if (DLedgerUtils.elapsed(lastCheckFastForwardTimeMs) < 1000) {
                return;
            }
            lastCheckFastForwardTimeMs  = System.currentTimeMillis();
            if (writeRequestMap.isEmpty()) {
                return;
            }

            checkAppendFuture(endIndex);
        }

        /**
         * Follower角色才会激活的多线程
         *
         * 当Follower接收到Leader发送的PUSH请求时,Follower会把请求保存到{@link #writeRequestMap}中.  详见:{@link #handlePush(PushEntryRequest)}
         *
         * 如果队列或者map中存在数据,这里使用多线程异步处理请求.
         * 处理 {@link #compareOrTruncateRequests} 和 {@link #writeRequestMap} 中的数据.
         */
        @Override
        public void doWork() {
            try {
                // 如果当前节点不是Follower,则此线程不执行.
                if (!memberState.isFollower()) {
                    waitForRunning(1);
                    return;
                }
                // 队列中有 TRUNCATE COMPARE COMMIT 类型的请求
                if (compareOrTruncateRequests.peek() != null) {
                    Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>> pair = compareOrTruncateRequests.poll();
                    PreConditions.check(pair != null, DLedgerResponseCode.UNKNOWN);
                    // 请求类型
                    switch (pair.getKey().getType()) {
                        case TRUNCATE:
                            handleDoTruncate(pair.getKey().getEntry().getIndex(), pair.getKey(), pair.getValue());
                            break;
                        case COMPARE:
                            handleDoCompare(pair.getKey().getEntry().getIndex(), pair.getKey(), pair.getValue());
                            break;
                        case COMMIT:
                            handleDoCommit(pair.getKey().getCommitIndex(), pair.getKey(), pair.getValue());
                            break;
                        default:
                            break;
                    }
                }

                // 队列中没有 TRUNCATE COMPARE COMMIT 类型的请求.此时需要处理'APPEND'类型的请求.
                else {
                    // 当前Follower已经保存的最大下标加1,为下一个需要处理的下标.
                    long nextIndex = dLedgerStore.getLedgerEndIndex() + 1;
                    // 从map中根据下标获取一个请求.
                    Pair<PushEntryRequest, CompletableFuture<PushEntryResponse>> pair = writeRequestMap.remove(nextIndex);
                    // 如果下一个index的请求为空,则返回.  通过这种方式,保证了Follower在处理PUSH请求时,按照顺序执行.
                    if (pair == null) {
                        checkAbnormalFuture(dLedgerStore.getLedgerEndIndex());
                        waitForRunning(1);
                        return;
                    }

                    // nextIndex对应的请求存在.
                    PushEntryRequest request = pair.getKey();
                    if (request.isBatch()) {
                        // 批量追加
                        handleDoBatchAppend(nextIndex, request, pair.getValue());
                    } else {
                        // 单条数据追加
                        handleDoAppend(nextIndex, request, pair.getValue());
                    }
                }
            } catch (Throwable t) {
                DLedgerEntryPusher.logger.error("Error in {}", getName(), t);
                DLedgerUtils.sleep(100);
            }
        }
    }
}
