/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.zookeeper.server.quorum;

import static java.nio.charset.StandardCharsets.UTF_8;

import java.io.IOException;
import java.nio.BufferUnderflowException;
import java.nio.ByteBuffer;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

import org.apache.zookeeper.common.Time;
import org.apache.zookeeper.jmx.MBeanRegistry;
import org.apache.zookeeper.server.ZooKeeperThread;
import org.apache.zookeeper.server.quorum.QuorumCnxManager.Message;
import org.apache.zookeeper.server.quorum.QuorumPeer.LearnerType;
import org.apache.zookeeper.server.quorum.QuorumPeer.ServerState;
import org.apache.zookeeper.server.quorum.QuorumPeerConfig.ConfigException;
import org.apache.zookeeper.server.quorum.flexible.QuorumOracleMaj;
import org.apache.zookeeper.server.quorum.flexible.QuorumVerifier;
import org.apache.zookeeper.server.util.ZxidUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**其维护了服务器之间的连接（用于发送消息）、发送消息队列、接收消息队列、推选者的一些信息（zxid、id）、是否停止选举流程标识等
 * <br/>Implementation of leader election using TCP. It uses an object of the class
 * QuorumCnxManager to manage connections. Otherwise, the algorithm is push-based
 * as with the other UDP implementations.
 *
 * <br/>There are a few parameters that can be tuned to change its behavior. First,
 * finalizeWait determines the amount of time to wait until deciding upon a leader.
 * This is part of the leader election algorithm.
 */

public class FastLeaderElection implements Election {

    private static final Logger LOG = LoggerFactory.getLogger(FastLeaderElection.class);

    /**完成Leader选举之后需要等待时长
     * <br/>Determine how much time a process has to wait
     * once it believes that it has reached the end of
     * leader election.
     */
    static final int finalizeWait = 200;

    /**两个连续通知检查之间的最大时长
     * <br/>Upper bound on the amount of time between two consecutive
     * notification checks. This impacts the amount of time to get
     * the system up again after long partitions. Currently 60 seconds.
     */
    private static int maxNotificationInterval = 60000;

    /**通知检查的下界。观察者不需要使用与参与者成员相同的下限
     * <br/>Lower bound for notification check. The observer don't need to use
     * the same lower bound as participant members
     */
    private static int minNotificationInterval = finalizeWait;

    /**
     * Minimum notification interval, default is equal to finalizeWait
     */
    public static final String MIN_NOTIFICATION_INTERVAL = "zookeeper.fastleader.minNotificationInterval";

    /**
     * Maximum notification interval, default is 60s
     */
    public static final String MAX_NOTIFICATION_INTERVAL = "zookeeper.fastleader.maxNotificationInterval";

    static {
        minNotificationInterval = Integer.getInteger(MIN_NOTIFICATION_INTERVAL, minNotificationInterval);
        LOG.info("{} = {} ms", MIN_NOTIFICATION_INTERVAL, minNotificationInterval);
        maxNotificationInterval = Integer.getInteger(MAX_NOTIFICATION_INTERVAL, maxNotificationInterval);
        LOG.info("{} = {} ms", MAX_NOTIFICATION_INTERVAL, maxNotificationInterval);
    }

    /**管理服务器之间的连接
     * <br/>Connection manager. Fast leader election uses TCP for
     * communication between peers, and QuorumCnxManager manages
     * such connections.
     */
    QuorumCnxManager manager;

    private SyncedLearnerTracker leadingVoteSet;

    /**Notification表示收到的选举投票信息（其他服务器发来的选举投票信息），其包含了被选举者的id、zxid、选举周期等信息
     * <br/>Notifications are messages that let other peers know that
     * a given peer has changed its vote, either because it has
     * joined leader election or because it learned of another
     * peer with higher zxid or same zxid and higher server id
     */

    public static class Notification {
        /**Format version, introduced in 3.4.6
         *
         */
        public static final int CURRENTVERSION = 0x2;

        int version;

        /**被推选的leader的id
         * <br/>Proposed leader
         */
        long leader;

        /**被推选的leader的事务id
         * <br/>zxid of the proposed leader
         */
        long zxid;

        /**推选者的选举周期
         * <br/>Epoch
         */
        long electionEpoch;

        /**推选者的状态
         * <br/>current state of sender
         */
        QuorumPeer.ServerState state;

        /**推选者的id
         * <br/>Address of sender
         */
        long sid;

        QuorumVerifier qv;

        /**被推选者的选举周期
         * <br/>epoch of the proposed leader
         */
        long peerEpoch;

    }

    static byte[] dummyData = new byte[0];

    /**ToSend表示发送给其他服务器的选举投票信息，也包含了被选举者的id、zxid、选举周期等信息
     * <br/>Messages that a peer wants to send to other peers.
     * These messages can be both Notifications and Acks
     * of reception of notification.
     */
    public static class ToSend {

        enum mType {
            crequest,
            challenge,
            notification,
            ack
        }

        ToSend(mType type, long leader, long zxid, long electionEpoch, ServerState state, long sid, long peerEpoch, byte[] configData) {
            this.leader = leader;
            this.zxid = zxid;
            this.electionEpoch = electionEpoch;
            this.state = state;
            this.sid = sid;
            this.peerEpoch = peerEpoch;
            this.configData = configData;
        }

        /**被推举的leader的id
         * <br/>Proposed leader in the case of notification
         */
        long leader;

        /**被推举的leader的最大事务id
         * <br/>id contains the tag for acks, and zxid for notifications
         */
        long zxid;

        /**推举者的选举周期
         * <br/>Epoch
         */
        long electionEpoch;

        /**推举者的状态
         * <br/>Current state;
         */
        QuorumPeer.ServerState state;

        /**推举者的id
         * <br/>Address of recipient
         */
        long sid;

        /**
         * Used to send a QuorumVerifier (configuration info)
         */
        byte[] configData = dummyData;

         /**被推举的leader的选举周期
          * <br/>Leader epoch
          */
         long peerEpoch;

    }

    /**选票发送队列，用于保存待发送的选票
     *
     */
    LinkedBlockingQueue<ToSend> sendqueue;
    /**选票接收队列，用于保存接收到的外部投票
     *
     */
    LinkedBlockingQueue<Notification> recvqueue;

    /**Messenger中维护了一个WorkerSender和WorkerReceiver，分别表示选票发送器和选票接收器
     * <br/>Multi-threaded implementation of message handler. Messenger
     * implements two sub-classes: WorkReceiver and  WorkSender. The
     * functionality of each is obvious from the name. Each of these
     * spawns a new thread.
     */
    protected class Messenger {

        /**WorkerReceiver实现了Runnable接口，是选票接收器。其会不断地从QuorumCnxManager中获取其他服务器发来的选举消息，并将其转换成一个选票，然后保存到recvqueue中，
         * <br/>在选票接收过程中，如果发现该外部选票的选举轮次小于当前服务器的，那么忽略该外部投票，同时立即发送自己的内部投票。其是将QuorumCnxManager的Message转化为FastLeaderElection的Notification
         * <br/>Receives messages from instance of QuorumCnxManager on method run(), and processes such messages.
         */
        class WorkerReceiver extends ZooKeeperThread {
        	// 是否终止
            volatile boolean stop;
            // 服务器之间的连接
            QuorumCnxManager manager;

            WorkerReceiver(QuorumCnxManager manager) {
                super("WorkerReceiver");
                this.stop = false;
                this.manager = manager;
            }

            @Override
            // 首先，会从QuorumCnxManager中存放接收到的消息的队列中去取得，之后就会对这个消息进行解析。但是，针对一条消息解析的前提是首先这个消息的大小应该大于28字节。消息发出者的角色(int)，所要支持的leader(long)，当前的zxid(long)，当前的轮数epoch(long)。以上是消息必须所包含的成员，缺少一样都无法完成后面关于选举的选票消息解析。
            // 在确认接受到的消息的大小之后，取得发出消息的服务器的id来判断该id是否在参与投票的服务器行列之内，如果不在（比如说该服务器的角色是一个observer），那么忽略这一消息，并且发送一条包含自己选票信息的消息给这个服务器。之后，将消息的具体数据注入在Notification类中，方便后续的使用。
            // 之后，判断此时服务器处于的状态，如果处于looking，那么说明此时的服务器需要这条消息来参与选举的过程，那么将这条消息正式加入先前构造方法中创建的存放消息的队列中。但是，如果这条消息发出方处于looking状态，并且其轮数小于当前的轮数，那么将自己当前的选票发送给这条消息的发送者。
            // 但是如果此时当前服务器不处于looking状态并且发送方还处于looking状态，那么则将选举结果发送回。
			public void run() {
            	// 响应
                Message response;
                while (!stop) {
                	// 不终止
                    // Sleeps on receive
                    try {
                    	// 1. 首先会从QuorumCnxManager中的recvQueue队列中取出其他服务器发来的选举消息，消息封装在Messenger数据结构中
                    	// 从recvQueue中取出一个选举投票消息（从其他服务器发送过来）
                        response = manager.pollRecvQueue(3000, TimeUnit.MILLISECONDS);
                        if (response == null) {
                        	// 无投票，跳过
                            continue;
                        }

                        final int capacity = response.buffer.capacity();

                        // 当前的协议和前两代协议都至少发送28字节
                        if (capacity < 28) {
                            LOG.error("Got a short response from server {}: {}", response.sid, capacity);
                            continue;
                        }

                        // this is the backwardCompatibility mode in place before ZK-107
                        // It is for a version of the protocol in which we didn't send peer epoch
                        // With peer epoch and version the message became 40 bytes
                        // 这是在ZK-107之前的向后兼容模式。它是协议的一个版本，在这个版本中我们没有发送peer epoch
                        boolean backCompatibility28 = (capacity == 28);

                        // this is the backwardCompatibility mode for no version information
                        // 这是向后兼容模式，没有版本信息
                        boolean backCompatibility40 = (capacity == 40);

                        // 设置buffer中的position、limit等属性
                        response.buffer.clear();

                        // Instantiate Notification and set its attributes
                        // 创建接收通知
                        Notification n = new Notification();

                        // 读取状态，即发送选票的服务器状态
                        int rstate = response.buffer.getInt();
                        long rleader = response.buffer.getLong();
                        long rzxid = response.buffer.getLong();
                        long relectionEpoch = response.buffer.getLong();
                        long rpeerepoch;

                        int version = 0x0;
                        QuorumVerifier rqv = null;

                        try {
                            if (!backCompatibility28) {
                                rpeerepoch = response.buffer.getLong();
                                if (!backCompatibility40) {
                                    /*
                                     * Version added in 3.4.6
                                     */
                                    version = response.buffer.getInt();
                                } else {
                                    LOG.info("Backward compatibility mode (36 bits), server id: {}", response.sid);
                                }
                            } else {
                                LOG.info("Backward compatibility mode (28 bits), server id: {}", response.sid);
                                rpeerepoch = ZxidUtils.getEpochFromZxid(rzxid);
                            }

                            // check if we have a version that includes config. If so extract config info from message.
                            // 检查我们是否有一个包含config的版本。如果是，则从消息中提取配置信息
                            if (version > 0x1) {
                                int configLength = response.buffer.getInt();

                                // we want to avoid errors caused by the allocation of a byte array with negative length
                                // (causing NegativeArraySizeException) or huge length (causing e.g. OutOfMemoryError)
                                if (configLength < 0 || configLength > capacity) {
                                    throw new IOException(String.format("Invalid configLength in notification message! sid=%d, capacity=%d, version=%d, configLength=%d", response.sid, capacity, version, configLength));
                                }

                                byte[] b = new byte[configLength];
                                response.buffer.get(b);

                                synchronized (self) {
                                    try {
                                        rqv = self.configFromString(new String(b, UTF_8));
                                        QuorumVerifier curQV = self.getQuorumVerifier();
                                        if (rqv.getVersion() > curQV.getVersion()) {
                                            LOG.info("{} Received version: {} my version: {}",
                                                     self.getId(),
                                                     Long.toHexString(rqv.getVersion()),
                                                     Long.toHexString(self.getQuorumVerifier().getVersion()));
                                            if (self.getPeerState() == ServerState.LOOKING) {
                                                LOG.debug("Invoking processReconfig(), state: {}", self.getServerState());
                                                self.processReconfig(rqv, null, null, false);
                                                if (!rqv.equals(curQV)) {
                                                    LOG.info("restarting leader election");
                                                    self.shuttingDownLE = true;
                                                    self.getElectionAlg().shutdown();

                                                    break;
                                                }
                                            } else {
                                                LOG.debug("Skip processReconfig(), state: {}", self.getServerState());
                                            }
                                        }
                                    } catch (IOException | ConfigException e) {
                                        LOG.error("Something went wrong while processing config received from {}", response.sid);
                                    }
                                }
                            } else {
                                LOG.info("Backward compatibility mode (before reconfig), server id: {}", response.sid);
                            }
                        } catch (BufferUnderflowException | IOException e) {
                            LOG.warn("Skipping the processing of a partial / malformed response message sent by sid={} (message length: {})", response.sid, capacity, e);
                            continue;
                        }
                        /*
                         * If it is from a non-voting server (such as an observer or
                         * a non-voting follower), respond right away.
                         */
                        // 2. Message中的对端服务器sid是否包含在可投票的服务器集合中，若不是，则会将本服务器的内部投票发送给该服务器
                        if (!validVoter(response.sid)) {
                        	// 获取自己的投票
                        	Vote current = self.getCurrentVote();
                            QuorumVerifier qv = self.getQuorumVerifier();
                            // 构造ToSend消息
                            ToSend notmsg = new ToSend(
                                ToSend.mType.notification,
                                current.getId(),
                                current.getZxid(),
                                logicalclock.get(),
                                self.getPeerState(),
                                response.sid, // 目标服务器id
                                current.getPeerEpoch(),
                                qv.toString().getBytes(UTF_8));
                            // 放入sendqueue队列，等待发送
                            sendqueue.offer(notmsg);
                        } else {
                        	// 若本服务器列表里包含发送选票的服务器，则根据消息（Message）解析出投票服务器的投票信息并将其封装为Notification，然后判断当前服务器是否为LOOKING，
                        	// 若为LOOKING，则直接将Notification放入FastLeaderElection的recvqueue（区别于recvQueue）中。
                        	// 然后判断投票服务器是否为LOOKING状态，并且其选举周期小于当前服务器的逻辑时钟，则将本（当前）服务器的内部投票发送给该服务器，否则，直接忽略掉该投票
                        	// Receive new message
                            LOG.debug("Receive new notification message. My id = {}", self.getId());

                            // State of peer that sent this message
                            // 发送选票的服务器的状态
                            QuorumPeer.ServerState ackstate = QuorumPeer.ServerState.LOOKING;
                            switch (rstate) {
                            case 0:
                                ackstate = QuorumPeer.ServerState.LOOKING;
                                break;
                            case 1:
                                ackstate = QuorumPeer.ServerState.FOLLOWING;
                                break;
                            case 2:
                                ackstate = QuorumPeer.ServerState.LEADING;
                                break;
                            case 3:
                                ackstate = QuorumPeer.ServerState.OBSERVING;
                                break;
                            default:
                                continue;
                            }

                            // 对端推选的leader的id
                            n.leader = rleader;
                            // 对端推选的leader的zxid
                            n.zxid = rzxid;
                            // 每执行一次leader选举，electionEpoch就会自增（选举一开始就加1），用来标记leader选举的轮次（对端的）
                            n.electionEpoch = relectionEpoch;
                            // 对端的服务器状态
                            n.state = ackstate;
                            // 对端的sid
                            n.sid = response.sid;
                            // 对端推选的leader的选举周期
                            n.peerEpoch = rpeerepoch;
                            // 确定版本号
                            n.version = version;
                            n.qv = rqv;
                            /*
                             * Print notification info
                             */
                            LOG.info(
                                "Notification: my state:{}; n.sid:{}, n.state:{}, n.leader:{}, n.round:0x{}, "
                                    + "n.peerEpoch:0x{}, n.zxid:0x{}, message format version:0x{}, n.config version:0x{}",
                                self.getPeerState(),
                                n.sid,
                                n.state,
                                n.leader,
                                Long.toHexString(n.electionEpoch),
                                Long.toHexString(n.peerEpoch),
                                Long.toHexString(n.zxid),
                                Long.toHexString(n.version),
                                (n.qv != null ? (Long.toHexString(n.qv.getVersion())) : "0"));

                            /*
                             * If this server is looking, then send proposed leader
                             */
                            // 本服务器为LOOKING状态
                            if (self.getPeerState() == QuorumPeer.ServerState.LOOKING) {
                            	// 将收到的Notification放入recvqueue中
                                recvqueue.offer(n);

                                /*
                                 * Send a notification back if the peer that sent this
                                 * message is also looking and its logical clock is
                                 * lagging behind.
                                 */
                                // 如果发送此消息的对端也在查找并且其逻辑时钟滞后，则发送一个通知回送
                                if ((ackstate == QuorumPeer.ServerState.LOOKING) // 发送选票的服务器为LOOKING状态
                                    && (n.electionEpoch < logicalclock.get())) // 发送选票的服务器的选举周期小于当前服务器的逻辑时钟
                                {
                                	// 创建新的投票
                                	Vote v = getVote();
                                	// 构造新的发送消息（本服务器自己的投票）
                                    QuorumVerifier qv = self.getQuorumVerifier();
                                    ToSend notmsg = new ToSend(
                                        ToSend.mType.notification,
                                        v.getId(),
                                        v.getZxid(),
                                        logicalclock.get(),
                                        self.getPeerState(),
                                        response.sid,
                                        v.getPeerEpoch(),
                                        qv.toString().getBytes());
                                    // 将发送消息放置于队列，等待发送
                                    sendqueue.offer(notmsg);
                                }
                            } else {
                            	// 如果自己不是LOOKING状态，那么有可能自己是leader或者follower，那么就把它认为的leader作为投票发送出去
                            	// 3. 如果自己的状态不为LOOKING，但是发送ack的服务器在LOOKING，那么发送回它认为是leader的服务器
                                /*
                                 * If this server is not looking, but the one that sent the ack
                                 * is looking, then send back what it believes to be the leader.
                                 */
                            	// 获取当前服务器认为的leader选票
                                Vote current = self.getCurrentVote();
                                // 为LOOKING状态
                                if (ackstate == QuorumPeer.ServerState.LOOKING) {
                                    if (self.leader != null) {
                                        if (leadingVoteSet != null) {
                                            self.leader.setLeadingVoteSet(leadingVoteSet);
                                            leadingVoteSet = null;
                                        }
                                        self.leader.reportLookingSid(response.sid);
                                    }


                                    LOG.debug(
                                        "Sending new notification. My id ={} recipient={} zxid=0x{} leader={} config version = {}",
                                        self.getId(),
                                        response.sid,
                                        Long.toHexString(current.getZxid()),
                                        current.getId(),
                                        Long.toHexString(self.getQuorumVerifier().getVersion()));

                                    QuorumVerifier qv = self.getQuorumVerifier();
                                    // 构造ToSend消息
                                    ToSend notmsg = new ToSend(
                                        ToSend.mType.notification,
                                        current.getId(),
                                        current.getZxid(),
                                        current.getElectionEpoch(),
                                        self.getPeerState(),
                                        response.sid,
                                        current.getPeerEpoch(),
                                        qv.toString().getBytes());
                                    // 将发送消息放置于队列，等待发送
                                    sendqueue.offer(notmsg);
                                }
                            }
                        }
                    } catch (InterruptedException e) {
                        LOG.warn("Interrupted Exception while waiting for new message", e);
                    }
                }
                LOG.info("WorkerReceiver is down");
            }

        }

        /**WorkerSender也实现了Runnable接口，为选票发送器，其会不断地从sendqueue中获取待发送的选票，并将其传递到底层QuorumCnxManager中，其过程是将FastLeaderElection的ToSend转化为QuorumCnxManager的Message
         * <br/>This worker simply dequeues a message to send and and queues it on the manager's queue.
         */
        class WorkerSender extends ZooKeeperThread {
        	// 是否终止
            volatile boolean stop;
            // 服务器之间的连接
            QuorumCnxManager manager;

            WorkerSender(QuorumCnxManager manager) {
                super("WorkerSender");
                // 初始化属性
                this.stop = false;
                this.manager = manager;
            }

            @Override
            // 负责发送消息的WokerSender的run()方法实现显得极为简单，只是不断从存放要发送消息的队列中去取，然后调用process()方法构造消息交由之前的QuorumCnxManager发送给别的服务器
			public void run() {
                while (!stop) {
                	// 不终止
                    try {
                    	// 从sendqueue中取出ToSend消息
                        ToSend m = sendqueue.poll(3000, TimeUnit.MILLISECONDS);
                        if (m == null) {
                        	// 若为空，则跳过
                            continue;
                        }

                        // 不为空，则进行处理
                        process(m);
                    } catch (InterruptedException e) {
                        break;
                    }
                }
                LOG.info("WorkerSender is down");
            }

            /**
             * Called by run() once there is a new message to send.
             *
             * @param m     message to send
             */
            void process(ToSend m) {
            	// 构建消息
                ByteBuffer requestBuffer = buildMsg(m.state.ordinal(), m.leader, m.zxid, m.electionEpoch, m.peerEpoch, m.configData);
                // 发送消息
                manager.toSend(m.sid, requestBuffer);
            }

        }

        /**选票发送器
         *
         */
        WorkerSender ws;
        /**选票接收器
         *
         */
        WorkerReceiver wr;
        Thread wsThread = null;
        Thread wrThread = null;

        /**
         * Constructor of class Messenger.
         *
         * @param manager   Connection manager
         */
        Messenger(QuorumCnxManager manager) {
        	// 创建WorkerSender
            this.ws = new WorkerSender(manager);
            // 新创建线程
            this.wsThread = new Thread(this.ws, "WorkerSender[myid=" + self.getId() + "]");
            // 设置为守护线程
            this.wsThread.setDaemon(true);

            // 创建WorkerReceiver
            this.wr = new WorkerReceiver(manager);
            // 创建线程
            this.wrThread = new Thread(this.wr, "WorkerReceiver[myid=" + self.getId() + "]");
            // 设置为守护线程
            this.wrThread.setDaemon(true);
        }

        /**会启动wsThread和wrThread
         * <br/>Starts instances of WorkerSender and WorkerReceiver
         */
        void start() {
        	// 启动
            this.wsThread.start();
            // 启动
            this.wrThread.start();
        }

        /**
         * Stops instances of WorkerSender and WorkerReceiver
         */
        void halt() {
            this.ws.stop = true;
            this.wr.stop = true;
        }

    }

    /**投票者
     *
     */
    QuorumPeer self;
    Messenger messenger;
    /**逻辑时钟，或者叫投票的次数，同一轮投票过程中的逻辑始终值是相同的。每投完一次票这个数据就会增加，然后与其它服务器返回的投票信息中的数据相比，根据不同的值做出不同的判定
     *
     */
    AtomicLong logicalclock = new AtomicLong(); /* Election instance */
    /**推选的leader的id
     *
     */
    long proposedLeader;
    /**推选的leader的zxid
     *
     */
    long proposedZxid;
    /**推选的leader的选举周期
     *
     */
    long proposedEpoch;

    /**
     * Returns the current value of the logical clock counter
     */
    public long getLogicalClock() {
        return logicalclock.get();
    }

    static ByteBuffer buildMsg(int state, long leader, long zxid, long electionEpoch, long epoch) {
        byte[] requestBytes = new byte[40];
        ByteBuffer requestBuffer = ByteBuffer.wrap(requestBytes);

        /*
         * Building notification packet to send, this is called directly only in tests
         */

        requestBuffer.clear();
        requestBuffer.putInt(state);
        requestBuffer.putLong(leader);
        requestBuffer.putLong(zxid);
        requestBuffer.putLong(electionEpoch);
        requestBuffer.putLong(epoch);
        requestBuffer.putInt(0x1);

        return requestBuffer;
    }

    static ByteBuffer buildMsg(int state, long leader, long zxid, long electionEpoch, long epoch, byte[] configData) {
        byte[] requestBytes = new byte[44 + configData.length];
        ByteBuffer requestBuffer = ByteBuffer.wrap(requestBytes);

        /*
         * Building notification packet to send
         */

        requestBuffer.clear();
        requestBuffer.putInt(state);
        requestBuffer.putLong(leader);
        requestBuffer.putLong(zxid);
        requestBuffer.putLong(electionEpoch);
        requestBuffer.putLong(epoch);
        requestBuffer.putInt(Notification.CURRENTVERSION);
        requestBuffer.putInt(configData.length);
        requestBuffer.put(configData);

        return requestBuffer;
    }

    /**
     * Constructor of FastLeaderElection. It takes two parameters, one
     * is the QuorumPeer object that instantiated this object, and the other
     * is the connection manager. Such an object should be created only once
     * by each peer during an instance of the ZooKeeper service.
     *
     * @param self  QuorumPeer that created this object
     * @param manager   Connection manager
     */
    public FastLeaderElection(QuorumPeer self, QuorumCnxManager manager) {
    	// 非stop
        this.stop = false;
        this.manager = manager;
        // 初始化其他信息
        starter(self, manager);
    }

    /**在FastLeaderElection的构造方法中，构造了两条队列分别存放要发送的消息和接收到的消息，同时根据之前负责与各个服务器之前通信的QuorumCnxManager来生成WokerSender和WorkerReceiver两条线程来负责消息的接收解析和发出，但此时线程的start()方法并还没有被调用，此时的选举并未开始
     * <br/>This method is invoked by the constructor. Because it is a
     * part of the starting procedure of the object that must be on
     * any constructor of this class, it is probably best to keep as
     * a separate method. As we have a single constructor currently,
     * it is not strictly necessary to have it separate.
     *
     * @param self      QuorumPeer that created this object
     * @param manager   Connection manager
     */
    private void starter(QuorumPeer self, QuorumCnxManager manager) {
    	// 赋值，对Leader和投票者的ID进行初始化操作
        this.self = self;
        proposedLeader = -1;
        proposedZxid = -1;

        // 初始化发送队列
        sendqueue = new LinkedBlockingQueue<ToSend>();
        // 初始化接收队列
        recvqueue = new LinkedBlockingQueue<Notification>();
        // 创建Messenger，会启动接收器和发送器线程
        this.messenger = new Messenger(manager);
    }

    /**启动sender和receiver线程
     * <br/>This method starts the sender and receiver threads.
     */
    public void start() {
        this.messenger.start();
    }

    private void leaveInstance(Vote v) {
        LOG.debug(
            "About to leave FLE instance: leader={}, zxid=0x{}, my id={}, my state={}",
            v.getId(),
            Long.toHexString(v.getZxid()),
            self.getId(),
            self.getPeerState());
        recvqueue.clear();
    }

    public QuorumCnxManager getCnxManager() {
        return manager;
    }

    /**是否停止选举
     *
     */
    volatile boolean stop;

    @Override
	public void shutdown() {
        stop = true;
        proposedLeader = -1;
        proposedZxid = -1;
        leadingVoteSet = null;
        LOG.debug("Shutting down connection manager");
        manager.halt();
        LOG.debug("Shutting down messenger");
        messenger.halt();
        LOG.debug("FLE is down");
    }

    /**其会遍历所有的参与者投票集合，然后将自己的选票信息发送至上述所有的投票者集合，其并非同步发送，而是将ToSend消息放置于sendqueue中，之后由WorkerSender进行发送
     * <br/>Send notifications to all peers upon a change in our vote
     */
    private void sendNotifications() {
    	// 遍历投票参与者集合
        for (long sid : self.getCurrentAndNextConfigVoters()) {
            QuorumVerifier qv = self.getQuorumVerifier();
            // 构造发送消息
            ToSend notmsg = new ToSend(
                ToSend.mType.notification,
                proposedLeader,
                proposedZxid,
                logicalclock.get(),
                QuorumPeer.ServerState.LOOKING,
                sid,
                proposedEpoch,
                qv.toString().getBytes(UTF_8));

            LOG.debug(
                "Sending Notification: {} (n.leader), 0x{} (n.zxid), 0x{} (n.round), {} (recipient),"
                    + " {} (myid), 0x{} (n.peerEpoch) ",
                proposedLeader,
                Long.toHexString(proposedZxid),
                Long.toHexString(logicalclock.get()),
                sid,
                self.getId(),
                Long.toHexString(proposedEpoch));

            // 将发送消息放置于队列
            sendqueue.offer(notmsg);
        }
    }

    /**将接收的投票与自身投票进行PK，查看消息中包含的服务器id是否更优，其按照epoch、zxid、id的优先级进行PK
     * <br/>Check if a pair (server id, zxid) succeeds our current vote.
     *
     */
    protected boolean totalOrderPredicate(long newId, long newZxid, long newEpoch, long curId, long curZxid, long curEpoch) {
        LOG.debug(
            "id: {}, proposed id: {}, zxid: 0x{}, proposed zxid: 0x{}",
            newId,
            curId,
            Long.toHexString(newZxid),
            Long.toHexString(curZxid));

        // 使用计票器判断当前服务器的权重是否为0
        if (self.getQuorumVerifier().getWeight(newId) == 0) {
            return false;
        }

        /*
         * We return true if one of the following three cases hold:
         * 1- New epoch is higher
         * 2- New epoch is the same as current epoch, but new zxid is higher
         * 3- New epoch is the same as current epoch, new zxid is the same
         *  as current zxid, but server id is higher.
         */
        // 1. 判断消息里的epoch是不是比当前的大，如果大则消息中id对应的服务器就是leader
        // 2. 如果epoch相等则判断zxid，如果消息里的zxid大，则消息中id对应的服务器就是leader
        // 3. 如果前面两个都相等那就比较服务器id，如果大，则其就是leader
        return ((newEpoch > curEpoch)
                || ((newEpoch == curEpoch)
                    && ((newZxid > curZxid)
                        || ((newZxid == curZxid)
                            && (newId > curId)))));
    }

    /**给定一组选票，返回SyncedLearnerTracker，它用来确定是否有足够的选票宣告选举轮的结束
     * <br/>Given a set of votes, return the SyncedLearnerTracker which is used to determines if have sufficient to declare the end of the election round.
     * @param votes Set of votes
     * @param vote Identifier of the vote received last 当前机器投出来的leader
     * @return the SyncedLearnerTracker with vote details
     */
    protected SyncedLearnerTracker getVoteTracker(Map<Long, Vote> votes, Vote vote) {
        SyncedLearnerTracker voteSet = new SyncedLearnerTracker();
        voteSet.addQuorumVerifier(self.getQuorumVerifier());
        if (self.getLastSeenQuorumVerifier() != null
            && self.getLastSeenQuorumVerifier().getVersion() > self.getQuorumVerifier().getVersion()) {
            voteSet.addQuorumVerifier(self.getLastSeenQuorumVerifier());
        }

        /*
         * First make the views consistent. Sometimes peers will have different
         * zxids for a server depending on timing.
         */
        // 遍历已经接收的投票集合
        for (Map.Entry<Long, Vote> entry : votes.entrySet()) {
        	// 将接收的选票和自己投的相同的票过滤出来
            if (vote.equals(entry.getValue())) {
                voteSet.addAck(entry.getKey()); // sid
            }
        }

        // 我只需要找到最终有多少机器和我投的一样就可以了
        return voteSet;
    }

    /**在这种情况下有个一leader已经选举了出来，并且有法定Server支持该leader，我们必须检查这个leader是否投票并已确认过其领导。
     * <br/>我们需要这种检查，以避免server反复地选择一个已经崩溃并且不再领导的leader
     * <br/>In the case there is a leader elected, and a quorum supporting
     * this leader, we have to check if the leader has voted and acked
     * that it is leading. We need this check to avoid that peers keep
     * electing over and over a peer that has crashed and it is no
     * longer leading.
     *
     * @param votes set of votes
     * @param   leader  leader id
     * @param   electionEpoch   epoch id
     */
    protected boolean checkLeader(Map<Long, Vote> votes, long leader, long electionEpoch) {

        boolean predicate = true;

        /*
         * If everyone else thinks I'm the leader, I must be the leader.
         * The other two checks are just for the case in which I'm not the
         * leader. If I'm not the leader and I haven't received a message
         * from leader stating that it is leading, then predicate is false.
         */
        if (leader != self.getId()) {
        	// 若推荐的leader是别人，我不是Leader
            if (votes.get(leader) == null) {
            	// 如果在votes，即outofelection中不存在，肯定是false
                predicate = false;
            } else if (votes.get(leader).getState() != ServerState.LEADING) {
            	// 如果存在，但是状态不是LEADING，也是false
                predicate = false;
            }
        } else if (logicalclock.get() != electionEpoch) {
        	// 如果每个人都认为我是领导，那我就是领导。
    		// 若推荐的leader是当前server，则判断为的逻辑时钟和推荐的epoch是否一样，不一样肯定是false
            predicate = false;
        }

        return predicate;
    }

    /**
     * @param leader 我提议的leader是谁
     * @param zxid
     * @param epoch
     */
    synchronized void updateProposal(long leader, long zxid, long epoch) {
        LOG.debug(
            "Updating proposal: {} (newleader), 0x{} (newzxid), {} (oldleader), 0x{} (oldzxid)",
            leader,
            Long.toHexString(zxid),
            proposedLeader,
            Long.toHexString(proposedZxid));

        proposedLeader = leader;
        proposedZxid = zxid;
        proposedEpoch = epoch;
    }

    public synchronized Vote getVote() {
        return new Vote(proposedLeader, proposedZxid, proposedEpoch);
    }

    /**
     * A learning state can be either FOLLOWING or OBSERVING.
     * This method simply decides which one depending on the
     * role of the server.
     *
     * @return ServerState
     */
    private ServerState learningState() {
        if (self.getLearnerType() == LearnerType.PARTICIPANT) {
            LOG.debug("I am a participant: {}", self.getId());
            return ServerState.FOLLOWING;
        } else {
            LOG.debug("I am an observer: {}", self.getId());
            return ServerState.OBSERVING;
        }
    }

    /**
     * Returns the initial vote value of server identifier.
     *
     * @return long
     */
    private long getInitId() {
        if (self.getQuorumVerifier().getVotingMembers().containsKey(self.getId())) {
            return self.getId();
        } else {
            return Long.MIN_VALUE;
        }
    }

    /**
     * Returns initial last logged zxid.
     *
     * @return long
     */
    private long getInitLastLoggedZxid() {
        if (self.getLearnerType() == LearnerType.PARTICIPANT) {
            return self.getLastLoggedZxid();
        } else {
            return Long.MIN_VALUE;
        }
    }

    /**
     * Returns the initial vote value of the peer epoch.
     *
     * @return long
     */
    private long getPeerEpoch() {
        if (self.getLearnerType() == LearnerType.PARTICIPANT) {
            try {
                return self.getCurrentEpoch();
            } catch (IOException e) {
                RuntimeException re = new RuntimeException(e.getMessage());
                re.setStackTrace(e.getStackTrace());
                throw re;
            }
        } else {
            return Long.MIN_VALUE;
        }
    }

    /**
     * Update the peer state based on the given proposedLeader. Also update
     * the leadingVoteSet if it becomes the leader.
     */
    private void setPeerState(long proposedLeader, SyncedLearnerTracker voteSet) {
        ServerState ss = (proposedLeader == self.getId()) ? ServerState.LEADING : learningState();
        self.setPeerState(ss);
        if (ss == ServerState.LEADING) {
            leadingVoteSet = voteSet;
        }
    }

    /**首先记录下选举开始的时间，在选举之后开始正式lead的工作时会计算两者的间隔作为选举的持续时间。
     * <br/>之后，在进入选举前，给代表当前轮数的参数logicalclock加一，代表在先前轮数的基础上加一表示新的一轮选举。
     * <br/>之后初始化自己的选票，第一次的情况下，选举自己为所要投票的leader。
     * <br/>之后调用sendNotifications()方法发送给所有参与投票的服务器。
     * <br/>之后如果自己的状态还是looking，那么开始根据收到的消息进行解析，并进行相应的操作。
     * <br/>从接收队列中选取消息之后，首先确认消息发出方是参与投票的服务器，如果不是，则在这里不予理会，相应的措施已经在刚刚的WorkerReceiver中完成。
     * <br/>在确认了消息的发出方是参与投票的服务器之后，根据其状态进行相应的操作。
     * <br/>首先如果是looking状态，那么用消息的轮数与当前服务器的轮数进行比较，如果接收到的消息的轮数大于当前服务器的轮数，那么可能说明当前服务器因为各种原因宕机了一段时间，那么就会立即更新自己的选举轮次(logicalclock)，并且清空所有已经收到的投票，然后使用初始化的投票来进行PK以确定是否变更内部投票。最终再将内部投票发送出去。
     * <br/>如果接收到的消息的轮数小于当前轮数，那么可以直接忽略了。
     * <br/>如果两者消息的轮数一样，那么通过totalOrderPredicate()方法，进行消息中参数的比较。
     * <br/>之后将接收到的消息的发出方的id作为key，收到的选票作为value存储在map中。
     * <br/>之后由于一条接收到的消息的处理已经完成，通过termPredicate()方法开始检验是否有必要确认选举结果的产生。
     * <br/>Starts a new round of leader election. Whenever our QuorumPeer
     * changes its state to LOOKING, this method is invoked, and it
     * sends notifications to all other peers.
     */
    @Override
	public Vote lookForLeader() throws InterruptedException {
        try {
            self.jmxLeaderElectionBean = new LeaderElectionBean();
            MBeanRegistry.getInstance().register(self.jmxLeaderElectionBean, self.jmxLocalPeerBean);
        } catch (Exception e) {
            LOG.warn("Failed to register with JMX", e);
            self.jmxLeaderElectionBean = null;
        }

        self.start_fle = Time.currentElapsedTime();
        try {
            /*
             * The votes from the current leader election are stored in recvset. In other words, a vote v is in recvset
             * if v.electionEpoch == logicalclock. The current participant uses recvset to deduce on whether a majority
             * of participants has voted for it.
             */
        	// 当前领导人选举的选票存储在recvset中。换句话说，如果v.electionepoch == logicalclock，则投票v在recvset中。当前的参与者使用recvset来推断是否大多数参与者投票支持它
            // recvset: 自己的投票箱；key: 其他服务器myid; value: vote
        	Map<Long, Vote> recvset = new HashMap<Long, Vote>();

            /*
             * The votes from previous leader elections, as well as the votes from the current leader election are
             * stored in outofelection. Note that notifications in a LOOKING state are not stored in outofelection.
             * Only FOLLOWING or LEADING notifications are stored in outofelection. The current participant could use
             * outofelection to learn which participant is the leader if it arrives late (i.e., higher logicalclock than
             * the electionEpoch of the received notifications) in a leader election.
             */
        	// 发送的投票集合
            Map<Long, Vote> outofelection = new HashMap<Long, Vote>();

            int notTimeout = minNotificationInterval;

            // 1. 开始新一轮的Leader选举，其首先会将逻辑时钟自增（用于校验是否在同一轮选举周期），然后更新本服务器的选票信息（初始化选票），之后将选票信息放入sendqueue等待发送给其他服务器
            synchronized (this) {
            	// 更新逻辑时钟，每进行一轮选举，都需要更新逻辑时钟
                logicalclock.incrementAndGet();
                // 更新选票（初始化选票，先投给自己，选票包含(myid, lastZxid, epoch)）
                // updateProposal: 更新票据信息，实际是将QuorumPeer中myid、zxid、epoch更新到FastLeaderElection中
                updateProposal(getInitId(), getInitLastLoggedZxid(), getPeerEpoch());
            }

            LOG.info(
                "New election. My id = {}, proposed zxid=0x{}",
                self.getId(),
                Long.toHexString(proposedZxid));
            // 创建ToSend信息，然后存入阻塞队列sendqueue中，异步向其他服务器发送自己的选票（已更新的选票）
            sendNotifications();

            SyncedLearnerTracker voteSet = null;

            /*
             * Loop in which we exchange notifications until we find a leader
             */
            // 在这个循环中，我们交换通知，直到找到领导者
            // 本服务器状态为LOOKING并且还未选出leader
            while ((self.getPeerState() == ServerState.LOOKING) && (!stop)) {
                /*
                 * Remove next notification from queue, times out after 2 times the termination time
                 */
            	// 2. 之后每台服务器会不断地从recvqueue队列中获取外部选票。如果服务器发现无法获取到任何外部投票，就立即确认自己是否和集群中其他服务器保持着有效的连接，如果没有连接，则马上建立连接，如果已经建立了连接，则再次发送自己当前的内部投票
            	// 从recvqueue接收队列中取出投票
            	// n: 接收到的选票
                Notification n = recvqueue.poll(notTimeout, TimeUnit.MILLISECONDS);

                /*
                 * Sends more notifications if haven't received enough.
                 * Otherwise processes new notification.
                 */
                // 无法获取选票
                if (n == null) {
                	// 判断有没有需要发送的数据，因为之前已经在sendNotifications过一次了（投票给自己），所以如果走到这里，发现有东西没发出去，有可能就是连接还未建立
                    if (manager.haveDelivered()) {
                    	// manager已经发送了所有选票消息（表示有连接），那么再次发送自己的投票
                        sendNotifications();
                    } else {
                    	// 还未发送所有消息（表示无连接）
                    	// 连接其他每个服务器，一旦和其他服务器建立好连接后，下次进入循环时，就有机会从recvqueue中取到数据了
                        manager.connectAll();
                    }

                    /*
                     * Exponential backoff
                     */
                    notTimeout = Math.min(notTimeout << 1, maxNotificationInterval);

                    /*
                     * When a leader failure happens on a master, the backup will be supposed to receive the honour from
                     * Oracle and become a leader, but the honour is likely to be delay. We do a re-check once timeout happens
                     *
                     * The leader election algorithm does not provide the ability of electing a leader from a single instance
                     * which is in a configuration of 2 instances.
                     * */
                    if (self.getQuorumVerifier() instanceof QuorumOracleMaj
                            && self.getQuorumVerifier().revalidateVoteset(voteSet, notTimeout != minNotificationInterval)) {
                        setPeerState(proposedLeader, voteSet);
                        Vote endVote = new Vote(proposedLeader, proposedZxid, logicalclock.get(), proposedEpoch);
                        leaveInstance(endVote);
                        return endVote;
                    }

                    LOG.info("Notification time out: {} ms", notTimeout);

                } else if (validVoter(n.sid) && validVoter(n.leader)) {
                	// 如果接收到了结果
                	// 3. 在发送完初始化选票之后，接着开始处理外部投票。在处理外部投票时，会根据选举轮次来进行不同的处理
                	// · 外部投票的选举轮次大于内部投票：若服务器自身的选举轮次落后于该外部投票对应服务器的选举轮次，那么就会立即更新自己的选举轮次(logicalclock)，并且清空所有已经收到的投票，然后使用初始化的投票来进行PK以确定是否变更内部投票。最终再将内部投票发送出去。
                	// · 外部投票的选举轮次小于内部投票：若服务器接收的外选票的选举轮次落后于自身的选举轮次，那么Zookeeper就会直接忽略该外部投票，不做任何处理。
                	// · 外部投票的选举轮次等于内部投票：此时可以开始进行选票PK，如果消息中的选票更优，则需要更新本服务器内部选票，再发送给其他服务器。
                	// 之后再对选票进行归档操作，无论是否变更了投票，都会将刚刚收到的那份外部投票放入选票集合recvset中进行归档，其中recvset用于记录当前服务器在本轮次的Leader选举中收到的所有外部投票，然后开始统计投票，
                	// 统计投票是为了统计集群中是否已经有过半的服务器认可了当前的内部投票，如果确定已经有过半服务器认可了该投票，然后再进行最后一次确认，判断是否又有更优的选票产生，若无，则终止投票，然后最终的选票

                	// 投票者集合中包含接收到消息中的服务器id
                    /*
                     * Only proceed if the vote comes from a replica in the current or next
                     * voting view for a replica in the current or next voting view.
                     */
                	// 确定接收消息中的服务器状态（也就是发送选票的那个服务器的状态）
                    switch (n.state) {
                    case LOOKING:
                    	// 发送选票的服务器也在进行领导者选举
                        if (getInitLastLoggedZxid() == -1) {
                            LOG.debug("Ignoring notification as our zxid is -1");
                            break;
                        }
                        if (n.zxid == -1) {
                            LOG.debug("Ignoring notification from member with -1 zxid {}", n.sid);
                            break;
                        }
                        // If notification > current, replace and send messages out
                        // 如果收到的投票逻辑时钟大于当前节点的逻辑时钟，说明需要更新逻辑时钟为最新的逻辑时钟
                        if (n.electionEpoch > logicalclock.get()) {
                        	// 设置自己的时钟为选票时钟
                            logicalclock.set(n.electionEpoch);
                            // 说明之前recvset中存储的信息失效，需要清空掉
                            recvset.clear();

                            // 进行pk，Vote投票比较sid(myid)，zxid，epoch
                            if (totalOrderPredicate(n.leader, n.zxid, n.peerEpoch, getInitId(), getInitLastLoggedZxid(), getPeerEpoch())) {
                            	// 如果接收的投票较新, 更新票据为收到的投票
                            	updateProposal(n.leader, n.zxid, n.peerEpoch);
                            } else {
                            	// 如果自己票据较新, 更新票据信息为自己票据
                                updateProposal(getInitId(), getInitLastLoggedZxid(), getPeerEpoch());
                            }
                            // 再次创建ToSend信息，然后存入阻塞队列sendqueue中，异步发送选举信息
                            sendNotifications();
                        } else if (n.electionEpoch < logicalclock.get()) {
                        	// 收到的选票的逻辑时钟小于当前选票的逻辑时钟，输出日志，跳过处理
                            LOG.debug(
                                "Notification election epoch is smaller than logicalclock. n.electionEpoch = 0x{}, logicalclock=0x{}",
                                Long.toHexString(n.electionEpoch),
                                Long.toHexString(logicalclock.get()));
                            break;
                        } else if (totalOrderPredicate(n.leader, n.zxid, n.peerEpoch, proposedLeader, proposedZxid, proposedEpoch)) {
                        	// 等于，PK，选出较优的服务器
                        	// 更新选票
                            updateProposal(n.leader, n.zxid, n.peerEpoch);
                            // 发送本服务器的内部选票消息
                            sendNotifications();
                        }

                        LOG.debug(
                            "Adding vote: from={}, proposed leader={}, proposed zxid=0x{}, proposed election epoch=0x{}",
                            n.sid,
                            n.leader,
                            Long.toHexString(n.zxid),
                            Long.toHexString(n.electionEpoch));

                        // don't care about the version if it's in LOOKING state
                        // 保存自己接收到的选票，recvset用于记录当前服务器在本轮次的Leader选举中收到的所有外部投票
                        recvset.put(n.sid, new Vote(n.leader, n.zxid, n.electionEpoch, n.peerEpoch));

                        // 根据接受到的选票，以及自己的投票，来判断能不能选出leader
                        voteSet = getVoteTracker(recvset, new Vote(proposedLeader, proposedZxid, logicalclock.get(), proposedEpoch));

                        // 如果符合过半验证，本台服务器就认为选出了leader
                        if (voteSet.hasAllQuorums()) {

                            // Verify if there is any change in the proposed leader
                        	// 此时，并不急着结束自己的选举流程，在一定timeout的情况下去等待相应的时间继续去从接收队列去取，这一期间如果自己的选票被更新，那么就要重新进入循环，把新消息放回队列，继续之前的选举流程，而要是不会更新自己选票的消息，那么无视，继续等待一定的timeout。
                        	while ((n = recvqueue.poll(finalizeWait, TimeUnit.MILLISECONDS)) != null) {
                        		// 如果又接收到了选票，如果该选票比刚刚选出来的更优秀，则把该选票加入recvqueue中，并且退出这个while
                        		// 这样做的目的是，我已经选出了一个leader，但又过来了新选票，如果新选票所选举的leader没有我选出来的优秀，直接忽略
                        		// 如果新选票比我选出来的优秀，则重新把这个选票放回到recvqueue，并退出当前循环，重新进入上层循环
                        		// 结束条件是在超时时间内没有新选票
                                if (totalOrderPredicate(n.leader, n.zxid, n.peerEpoch, proposedLeader, proposedZxid, proposedEpoch)) {
                                	// 将更优的选票放在recvqueue中
                                    recvqueue.put(n);
                                    break;
                                }
                            }

                            /*
                             * This predicate is true once we don't read any new relevant message from the reception queue
                             */
                        	// 如果没有获取到选票了，那么leader已经选出来了，表示之前提议的Leader已经是最优的
                        	// 如果在一定的timeout之后，成功没有接受到新的消息，宣告自己的选举过程结束，如果自己当前的投票对象是自己，那么当前服务器成为leader，否则成为learner。
                        	if (n == null) {
                                // 设置服务器状态
                        		setPeerState(proposedLeader, voteSet);
                                // 将本服务器最终的投票返回
                        		Vote endVote = new Vote(proposedLeader, proposedZxid, logicalclock.get(), proposedEpoch);
                                // 清空recvqueue队列的选票
                        		leaveInstance(endVote);
                        		// 返回选票
                                return endVote;
                            }
                        }
                        break;
                    case OBSERVING:
                        LOG.debug("Notification from observer: {}", n.sid);
                        break;

                        /*
                        * In ZOOKEEPER-3922, we separate the behaviors of FOLLOWING and LEADING.
                        * To avoid the duplication of codes, we create a method called followingBehavior which was used to
                        * shared by FOLLOWING and LEADING. This method returns a Vote. When the returned Vote is null, it follows
                        * the original idea to break swtich statement; otherwise, a valid returned Vote indicates, a leader
                        * is generated.
                        *
                        * The reason why we need to separate these behaviors is to make the algorithm runnable for 2-node
                        * setting. An extra condition for generating leader is needed. Due to the majority rule, only when
                        * there is a majority in the voteset, a leader will be generated. However, in a configuration of 2 nodes,
                        * the number to achieve the majority remains 2, which means a recovered node cannot generate a leader which is
                        * the existed leader. Therefore, we need the Oracle to kick in this situation. In a two-node configuration, the Oracle
                        * only grants the permission to maintain the progress to one node. The oracle either grants the permission to the
                        * remained node and makes it a new leader when there is a faulty machine, which is the case to maintain the progress.
                        * Otherwise, the oracle does not grant the permission to the remained node, which further causes a service down.
                        *
                        * In the former case, when a failed server recovers and participate in the leader election, it would not locate a
                        * new leader because there does not exist a majority in the voteset. It fails on the containAllQuorum() infinitely due to
                        * two facts. First one is the fact that it does do not have a majority in the voteset. The other fact is the fact that
                        * the oracle would not give the permission since the oracle already gave the permission to the existed leader, the healthy machine.
                        * Logically, when the oracle replies with negative, it implies the existed leader which is LEADING notification comes from is a valid leader.
                        * To threat this negative replies as a permission to generate the leader is the purpose to separate these two behaviors.
                        *
                        *
                        * */
                    // 4. 若选票中的服务器状态为FOLLOWING或者LEADING时，其大致步骤会判断选举周期是否等于逻辑时钟，归档选票，是否已经完成了Leader选举，设置服务器状态，修改逻辑时钟等于选举周期，返回最终选票
                    case FOLLOWING:
                        /*
                        * To avoid duplicate codes
                        * */
                        Vote resultFN = receivedFollowingNotification(recvset, outofelection, voteSet, n);
                        if (resultFN == null) {
                            break;
                        } else {
                            return resultFN;
                        }
                    case LEADING:
                    	// 如果是leading，那么说明此时leader已经被选出，直接根据收到的选票更新自己的角色完成选举流程
                    	/*
                        * In leadingBehavior(), it performs followingBehvior() first. When followingBehavior() returns
                        * a null pointer, ask Oracle whether to follow this leader.
                        * */
                        Vote resultLN = receivedLeadingNotification(recvset, outofelection, voteSet, n);
                        if (resultLN == null) {
                            break;
                        } else {
                            return resultLN;
                        }
                    default:
                        LOG.warn("Notification state unrecognized: {} (n.state), {}(n.sid)", n.state, n.sid);
                        break;
                    }
                } else {
                    if (!validVoter(n.leader)) {
                        LOG.warn("Ignoring notification for non-cluster member sid {} from sid {}", n.leader, n.sid);
                    }
                    if (!validVoter(n.sid)) {
                        LOG.warn("Ignoring notification for sid {} from non-quorum member sid {}", n.leader, n.sid);
                    }
                }
            }
            return null;
        } finally {
            try {
                if (self.jmxLeaderElectionBean != null) {
                    MBeanRegistry.getInstance().unregister(self.jmxLeaderElectionBean);
                }
            } catch (Exception e) {
                LOG.warn("Failed to unregister with JMX", e);
            }
            self.jmxLeaderElectionBean = null;
            LOG.debug("Number of connection processing threads: {}", manager.getConnectionThreadCount());
        }
    }

    private Vote receivedFollowingNotification(Map<Long, Vote> recvset, Map<Long, Vote> outofelection, SyncedLearnerTracker voteSet, Notification n) {
        /*
         * Consider all notifications from the same epoch together.
         */
    	// 同时考虑来自同一epoch的所有通知
    	// 如果接收到的选票与自己的逻辑时钟相等
        if (n.electionEpoch == logicalclock.get()) {
        	// 将接收到的服务器选票信息放入recvset中
        	recvset.put(n.sid, new Vote(n.leader, n.zxid, n.electionEpoch, n.peerEpoch, n.state));
        	// 以对端选票中推举的leader作为参照，遍历自己的投票箱，如果与投票中的leader相同的服务器个数超过参与选票的服务器总数的一半，那么就确认了对端服务器的选举结果产生，结果就是对端所投票的那个服务器
            voteSet = getVoteTracker(recvset, new Vote(n.version, n.leader, n.zxid, n.electionEpoch, n.peerEpoch, n.state));
            // 判断是否完成了leader选举
            if (voteSet.hasAllQuorums() && checkLeader(recvset, n.leader, n.electionEpoch)) {
            	// 已经完成了leader选举
            	// 设置本服务器的状态
            	setPeerState(n.leader, voteSet);
            	// 最终的选票
            	Vote endVote = new Vote(n.leader, n.zxid, n.electionEpoch, n.peerEpoch);
            	// 清空recvqueue队列的选票
                leaveInstance(endVote);
                return endVote;
            }
        }

        /*
         * Before joining an established ensemble, verify that
         * a majority are following the same leader.
         *
         * Note that the outofelection map also stores votes from the current leader election.
         * See ZOOKEEPER-1732 for more information.
         */
        outofelection.put(n.sid, new Vote(n.version, n.leader, n.zxid, n.electionEpoch, n.peerEpoch, n.state));
        voteSet = getVoteTracker(outofelection, new Vote(n.version, n.leader, n.zxid, n.electionEpoch, n.peerEpoch, n.state));

        if (voteSet.hasAllQuorums() && checkLeader(outofelection, n.leader, n.electionEpoch)) {
        	// 已经完成了leader选举
            synchronized (this) {
            	// 设置逻辑时钟
                logicalclock.set(n.electionEpoch);
                // 设置状态
                setPeerState(n.leader, voteSet);
            }
            // 最终选票
            Vote endVote = new Vote(n.leader, n.zxid, n.electionEpoch, n.peerEpoch);
            // 清空recvqueue队列的选票
            leaveInstance(endVote);
            // 返回选票
            return endVote;
        }

        return null;
    }

    private Vote receivedLeadingNotification(Map<Long, Vote> recvset, Map<Long, Vote> outofelection, SyncedLearnerTracker voteSet, Notification n) {
        /*
        *
        * In a two-node configuration, a recovery nodes cannot locate a leader because of the lack of the majority in the voteset.
        * Therefore, it is the time for Oracle to take place as a tight breaker.
        *
        * */
        Vote result = receivedFollowingNotification(recvset, outofelection, voteSet, n);
        if (result == null) {
            /*
            * Ask Oracle to see if it is okay to follow this leader.
            *
            * We don't need the CheckLeader() because itself cannot be a leader candidate
            * */
            if (self.getQuorumVerifier().getNeedOracle() && !self.getQuorumVerifier().askOracle()) {
                LOG.info("Oracle indicates to follow");
                setPeerState(n.leader, voteSet);
                Vote endVote = new Vote(n.leader, n.zxid, n.electionEpoch, n.peerEpoch);
                leaveInstance(endVote);
                return endVote;
            } else {
                LOG.info("Oracle indicates not to follow");
                return null;
            }
        } else {
            return result;
        }
    }

    /**当前的投票者集合是否包含给定的sid
     * <br/>Check if a given sid is represented in either the current or the next voting view
     * @param sid Server identifier
     * @return boolean
     */
    private boolean validVoter(long sid) {
        return self.getCurrentAndNextConfigVoters().contains(sid);
    }

}
