/**
 * Licensed to the Apache Software Foundation (ASF) under one or more contributor license
 * agreements.  See the NOTICE file distributed with this work for additional information regarding
 * copyright ownership.  The ASF licenses this file to you under the Apache License, Version 2.0
 * (the "License"); you may not use this file except in compliance with the License.  You may obtain
 * a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software distributed under the License
 * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
 * or implied. See the License for the specific language governing permissions and limitations under
 * the License.
 */

package org.apache.zookeeper.server;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.Flushable;
import java.io.IOException;
import java.util.LinkedList;
import java.util.Random;
import java.util.concurrent.LinkedBlockingQueue;

/**
 * 这个请求处理器主要记录请求到磁盘上，它通过批量的方式，来高效的处理io。
 * 请求不会流向下一个请求处理器，直到请求已经被同步记录到磁盘日志中。
 *
 * This RequestProcessor logs requests to disk. It batches the requests to do
 * the io efficiently. The request is not passed to the next RequestProcessor
 * until its log has been synced to disk.
 *
 * SyncRequestProcessor is used in 3 different cases
 * 1. Leader - Sync request to disk and forward it to AckRequestProcessor which
 *             send ack back to itself.
 * 2. Follower - Sync request to disk and forward request to
 *             SendAckRequestProcessor which send the packets to leader.
 *             SendAckRequestProcessor is flushable which allow us to force
 *             push packets to leader.
 * 3. Observer - Sync committed request to disk (received as INFORM packet).
 *             It never send ack back to the leader, so the nextProcessor will
 *             be null. This change the semantic of txnlog on the observer
 *             since it only contains committed txns.
 */
public class SyncRequestProcessor extends ZooKeeperCriticalThread implements RequestProcessor {
    private static final Logger LOG = LoggerFactory.getLogger(SyncRequestProcessor.class);

    /**
     * ZooKeeper 服务端，记录事务使用'快照文件'和'事务日志'（类似于 预写式日志）。
     * 在一个zk集群中，为了防止所有的机器同时执行文件快照，
     * 每个ZooKeeper server节点上，将会根据 snapCount 来生成一个随机数（在这里类中就是变量{@link SyncRequestProcessor#randRoll}）。
     * 这个随机数在 [snapCount/2+1, snapCount] 之间，默认的snapCount配置是100000.
     *
     * 详见 zookeeper-release-3.4.11/docs/zookeeperAdmin.html 文档。
     */
    private static int snapCount = 3;//ZooKeeperServer.getSnapCount();

    /**
     * 根据 snapCount 随机生成的一个数字，用于决定：记录多少事务日志之后，就执行文件快照。
     * 随机生成的目的：在多几点集群中，避免所有节点在同一时间进行 快照。
     *
     * @see SyncRequestProcessor#snapCount
     */
    private static int randRoll;

    private final ZooKeeperServer zks;

    /**
     * 对于当前这个处理器来说，可以把 queuedRequests 理解为 incomingRequests
     *
     * todo-lxf:1、假如：queuedRequests中元素：【req1,req2,requestOfDeath,req4】
     * 此线程和shutdown()方法结合，能保证 queuedRequests 里，在调用 shutdown()方法之‘前’的请求，都能正常执行完毕。
     * todo-lxf:2、上面的假设条件是否会成立？
     */
    private final LinkedBlockingQueue<Request> queuedRequests = new LinkedBlockingQueue<Request>();

    private final RequestProcessor nextProcessor;
    /**
     * 事务已经被写到事务日志，等待flush到磁盘中 and 需要交给nextProcessor处理的请求
     *
     * 对于当前这个处理器来说，可以把 toFlush 理解为 outgoingRequests
     *
     * Transactions that have been written and are waiting to be flushed to disk.
     * Basically this is the list of SyncItems whose callbacks will be
     * invoked after flush returns successfully.
     */
    private final LinkedList<Request> toFlush = new LinkedList<Request>();

    private final Random r = new Random(System.nanoTime());
    private final Request requestOfDeath = Request.requestOfDeath;
    /**
     * 执行快照的线程
     */
    private Thread snapInProcess = null;
    /**
     * 线程是否在运行状态
     */
    volatile private boolean running;


    public SyncRequestProcessor(ZooKeeperServer zks, RequestProcessor nextProcessor) {
        super("SyncThread:" + zks.getServerId(), zks.getZooKeeperServerListener());
        this.zks = zks;
        this.nextProcessor = nextProcessor;
        running = true;
    }

    /**
     * used by tests to get the snapcount
     * @return the snapcount
     */
    public static int getSnapCount() {
        return snapCount;
    }

    /**
     * used by tests to check for changing
     * snapcounts
     * @param count
     */
    public static void setSnapCount(int count) {
        snapCount = count;
        randRoll = count;
    }

    /**
     * @see SyncRequestProcessor#randRoll
     *
     * @param roll
     */
    private static void setRandRoll(int roll) {
        randRoll = roll;
    }

    @Override
    public void run() {
        try {
            // 从启动开始记录的事务日志数量
            int logCount = 0;

            setRandRoll(r.nextInt(snapCount / 2));

            while (true) {
                Request si;
                // 如果 toFlush 不为空（可理解为outgoingRequests）：说明有请求需要交给nextProcessor处理
                if (toFlush.isEmpty()) {
                    si = queuedRequests.take(); // tack会阻塞
                } else {
                    si = queuedRequests.poll(); // poll不会阻塞，如果队列中没有元素，则返回null
                    if (si == null) {
                        // 非阻塞获取时，队列中不存在元素情况：可能是所有的请求已经都处理完了。好久都没有请求了。此时flush 队列。
                        flush(toFlush);
                        continue;
                    }
                }

                // 遇到结束事件：跳出循环，立即结束线程。(不管队列中是否仍然有请求)
                if (si == requestOfDeath) {
                    break;
                }

                if (si == null) {
                    continue;
                }

                // 把请求追加到下层的事务日志中。（如果append返回false，则说明请求的事务header不存在：是读请求）
                if (zks.getZKDatabase().append(si)) {
                    // 追加请求到事务日志成功：数量++
                    logCount++;

                    // 判断是否需要启动快照
                    if (logCount > (snapCount / 2 + randRoll)) {
                        // 条件符合，启动快照
                        setRandRoll(r.nextInt(snapCount / 2)); // 在此生成一个新的随机数

                        // 将当前日志文件滚动到新的日志文件。
                        zks.getZKDatabase().rollLog();

                        // 执行数据快照
                        if (snapInProcess != null && snapInProcess.isAlive()) {
                            // snapInProcess正在运行中。
                            LOG.warn("Too busy to snap, skipping");
                        } else {
                            // snapInProcess is not alive，则新建一个线程，并启动。
                            snapInProcess = new ZooKeeperThread("Snapshot Thread") {
                                @Override
                                public void run() {
                                    try {
                                        zks.takeSnapshot(); // 这个线程执行完毕，就结束了。
                                    } catch (Exception e) {
                                        LOG.warn("Unexpected exception", e);
                                    }
                                }
                            };
                            snapInProcess.start();
                        }
                        // reset 事务日志数量
                        logCount = 0;
                    }


                    // 下面else： zks.getZKDatabase().append(si)返回false：只有一种可能：请求的事务header不存在：是读请求。（详见：PrepRequestProcessor#pRequest对读请求的处理）
                } else if (toFlush.isEmpty()) {

                    // 优化大量的读请求：当且仅当这是读操作并且已经没有任何的写请求，此时把请求交给下一个请求处理器。
                    // optimization for read heavy workloads iff this is a read, and there are no pending flushes (writes), then just pass this to the next processor
                    if (nextProcessor != null) {
                        nextProcessor.processRequest(si);
                        if (nextProcessor instanceof Flushable) {
                            ((Flushable) nextProcessor).flush();
                        }
                    }
                    continue;
                }

                // 对于写请求，或者 toFlush is not empty情况的读请求（因为zk要保证请求的顺序一致性），把请求放到队列中。
                toFlush.add(si);
                // 如果数量大于1000，就flush一次
                if (toFlush.size() > 1000) {
                    flush(toFlush);
                }
            }
        } catch (Throwable t) {
            handleException(this.getName(), t);
            running = false; // 线程中发生异常，设置线程已经停止运行。
        }
        LOG.info("SyncRequestProcessor exited!");
    }

    /**
     * 1、ZKDatabase#commit
     * 2、按顺序把请求交给下一个请求处理器。   for  :   nextProcessor.processRequest(request);
     * 3、如果下一个处理器把请求都处理完了，调用下一个请求的flush方法。
     *
     * 此方法不会同时被多个线程调用。
     * @param toFlush
     * @throws IOException
     * @throws RequestProcessorException
     */
    private void flush(LinkedList<Request> toFlush) throws IOException, RequestProcessorException {
        if (toFlush.isEmpty()) {
            return;
        }

        // zk database 提交操作：其实就为调用 OutputStream#flush 方法
        zks.getZKDatabase().commit();

        // 把请求交给下一个请求处理器
        while (!toFlush.isEmpty()) {
            Request request = toFlush.remove();
            if (nextProcessor != null) {
                // 使用下一个处理器，处理请求
                nextProcessor.processRequest(request);
            }
        }

        // 如果下一个处理器把请求都处理完了，调用下一个请求的flush方法。
        if (nextProcessor != null && nextProcessor instanceof Flushable) {
            ((Flushable) nextProcessor).flush();
        }
    }

    @Override
    public void shutdown() {
        LOG.info("Shutting down");
        queuedRequests.add(requestOfDeath);
        try {
            // 如果线程仍在运行，等待运行完毕。
            if (running) {
                this.join();
            }
            // 如果线程运行结束，此时toFlush里还有元素，则说明是shutdown() 方法之后add进去的。
            if (!toFlush.isEmpty()) {
                flush(toFlush);
            }
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while wating for " + this + " to finish");
        } catch (IOException e) {
            LOG.warn("Got IO exception during shutdown");
        } catch (RequestProcessorException e) {
            LOG.warn("Got request processor exception during shutdown");
        }
        if (nextProcessor != null) {
            nextProcessor.shutdown();
        }
    }

    @Override
    public void processRequest(Request request) {
        queuedRequests.add(request);
    }

}
