/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.zookeeper.server;

import java.io.Flushable;
import java.io.IOException;
import java.util.ArrayDeque;
import java.util.Objects;
import java.util.Queue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;

import org.apache.zookeeper.common.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**发送Sync请求的处理器，该处理器将事务请求存入磁盘，其将请求批量的存入磁盘以提高效率，请求在写入磁盘之前是不会被转发到下个处理器的。
 * <br/>与PrepRequestProcessor一样，SyncRequestProcessor也继承了Thread类并实现了RequestProcessor接口，表示其可以作为线程使用
 * <br/>负责对某个请求进行持久化，但是它又不仅仅只对Request进行持久化，还会打快照（对DataTree进行持久化）
 * <br/>但是在SyncReqeustProcessor中的持久化，不是说每接收到一个请求就进行持久化，而是攒一批再进行持久化，SyncReqeustProcessor会不断的接收到读写请求，当接收到写请求时，会把请求写到文件的OutputStream中，并把请求添加到toFlush队列中
 * <br/>如果处理完一个写请求后，一小段时间内没有接收到新的请求，则会把toFlush队列中的请求进行flush（真正把数据持久化到file中去），并把请求交给nextProcessor进行处理
 * <br/>如果处理完一个写请求后，正好满足了应该flush的条件（时间条件和累积的请求个数）则也会进行flush
 * <br/>This RequestProcessor logs requests to disk. It batches the requests to do
 * the io efficiently. The request is not passed to the next RequestProcessor
 * until its log has been synced to disk.
 *
 * SyncRequestProcessor is used in 3 different cases
 * 1. Leader - Sync request to disk and forward it to AckRequestProcessor which
 *             send ack back to itself.
 * 2. Follower - Sync request to disk and forward request to
 *             SendAckRequestProcessor which send the packets to leader.
 *             SendAckRequestProcessor is flushable which allow us to force
 *             push packets to leader.
 * 3. Observer - Sync committed request to disk (received as INFORM packet).
 *             It never send ack back to the leader, so the nextProcessor will
 *             be null. This change the semantic of txnlog on the observer
 *             since it only contains committed txns.
 */
public class SyncRequestProcessor extends ZooKeeperCriticalThread implements RequestProcessor {

    private static final Logger LOG = LoggerFactory.getLogger(SyncRequestProcessor.class);

    /**结束请求标识
     *
     */
    private static final Request REQUEST_OF_DEATH = Request.requestOfDeath;

    /**快照个数
     * <br>The number of log entries to log before starting a snapshot
     */
    private static int snapCount = ZooKeeperServer.getSnapCount();

    /**
     * The total size of log entries before starting a snapshot
     */
    private static long snapSizeInBytes = ZooKeeperServer.getSnapSizeInBytes();

    /**日志滚动之前记录的日志号，编号是随机选择的
     * <br/>Random numbers used to vary snapshot timing
     */
    private int randRoll;
    private long randSize;

    /**请求队列
     *
     */
    private final BlockingQueue<Request> queuedRequests = new LinkedBlockingQueue<Request>();

    /**快照处理线程信号量
     *
     */
    private final Semaphore snapThreadMutex = new Semaphore(1);

    /**Zookeeper服务器
     *
     */
    private final ZooKeeperServer zks;

    /**下个处理器
     *
     */
    private final RequestProcessor nextProcessor;

    /**等待被刷新到磁盘的请求队列
     * <br/>Transactions that have been written and are waiting to be flushed to
     * disk. Basically this is the list of SyncItems whose callbacks will be
     * invoked after flush returns successfully.
     */
    private final Queue<Request> toFlush;
    private long lastFlushTime;

    /**构造函数首先会调用Thread类的构造函数，然后根据构造函数参数给类的属性赋值，其中会确定下个处理器
     * @param zks
     * @param nextProcessor
     */
    public SyncRequestProcessor(ZooKeeperServer zks, RequestProcessor nextProcessor) {
        super("SyncThread:" + zks.getServerId(), zks.getZooKeeperServerListener());
        this.zks = zks;
        this.nextProcessor = nextProcessor;
        this.toFlush = new ArrayDeque<>(zks.getMaxBatchSize());
    }

    /**
     * used by tests to check for changing
     * snapcounts
     * @param count
     */
    public static void setSnapCount(int count) {
        snapCount = count;
    }

    /**
     * used by tests to get the snapcount
     * @return the snapcount
     */
    public static int getSnapCount() {
        return snapCount;
    }

    private long getRemainingDelay() {
        long flushDelay = zks.getFlushDelay();
        long duration = Time.currentElapsedTime() - lastFlushTime;
        if (duration < flushDelay) {
            return flushDelay - duration;
        }
        return 0;
    }

    /**如果同时设置了flushDelay和maxMaxBatchSize(大于0)，则在遇到任何一个条件时都会刷新。如果只设置了一个或另一个，则只在符合相关条件时刷新
     * <br/>If both flushDelay and maxMaxBatchSize are set (bigger than 0), flush whenever either condition is hit.
     * <br/>If only one or the other is set, flush only when the relevant condition is hit.
     */
    private boolean shouldFlush() {
    	// 刷新事务日志到磁盘的延迟时间
        long flushDelay = zks.getFlushDelay();
        // 刷新事务日志到磁盘的批量条数
        long maxBatchSize = zks.getMaxBatchSize();
        if ((flushDelay > 0) && (getRemainingDelay() == 0)) {
            return true;
        }
        return (maxBatchSize > 0) && (toFlush.size() >= maxBatchSize);
    }

    /**
     * used by tests to check for changing
     * snapcounts
     * @param size
     */
    public static void setSnapSizeInBytes(long size) {
        snapSizeInBytes = size;
    }

    private boolean shouldSnapshot() {
    	// 当前已经发生的事务请求个数
        int logCount = zks.getZKDatabase().getTxnCount();
        // 当前产生的日志文件总大小
        long logSize = zks.getZKDatabase().getTxnSize();
        // 满足roll the log的条件：每隔snapCount/2个request会重新生成一个snapshot并滚动一次txnlog，同时为了避免所有的zookeeper server在同一个时间生成snapshot和滚动日志，这里会再加上一个随机数
        return (logCount > (snapCount / 2 + randRoll))
               || (snapSizeInBytes > 0 && logSize > (snapSizeInBytes / 2 + randSize));
    }

    /**重置随机数
     *
     */
    private void resetSnapshotStats() {
        randRoll = ThreadLocalRandom.current().nextInt(snapCount / 2);
        randSize = Math.abs(ThreadLocalRandom.current().nextLong() % (snapSizeInBytes / 2));
    }

    @Override
    // SyncRequestProcessorr负责把事务请求（写request）持久化到本地磁盘，为了提高写磁盘的效率，这里使用的是缓冲写，通过周期性（1000个request）的调用flush操作，flush之后request已经确保写到磁盘了，这时会把请求传给AckRequestProcessor或SendAckRequestProcessor继续处理。使用这种方式可以在持久化多个事务的时候，只使用一次磁盘寻道(Disk Seek)的开销。Request对象只有在其中事务同步到磁盘后（flush后），才会传递到下一个处理器。
    // 该函数是整个处理器的核心，其逻辑大致如下
	// (1) 设置randRoll大小，确保所有的服务器在同一时间不是使用的同一个快照。
	// (2) 从queuedRequests中取出一个请求，判断该请求是否为null，若是，则进入(3)，若否，则进入(4)。
	// (3) 调用flush函数，将toFlush中的请求刷新到磁盘，从queuedRequests中取出一个请求，如果没有就阻塞
	// (4) 判断请求是否是结束请求（在调用shutdown之后，会在队列中添加一个requestOfDeath）。若是，则退出，否则，进入(5)。
	// (5) 若写入日志成功，返回true（表示为事务性请求），进入(6)，否则进入(11)。
	// (6) logCount加1，并判断是否大于了阈值，若是，则进入(7)，否则进入(11)。
	// (7) 调用rollLog函数翻转日志文件。
	// (8) 判断snapInProcess是否为空并且是否存活，若是，则输出日志，否则，进入(9)。
	// (9) 创建snapInProcess线程并启动。
	// (10) 重置logCount为0。
	// (11) 判断toFlush队列是否为空，若是，进入(12)，否则进入(15)。
	// (12) 判断nextProcessor是否为空，若否，则使用nextProcessor处理请求，否则进入(13)。
	// (13) 判断nextProcessor是否是Flushable的，若是，则调用flush函数刷新请求至磁盘，否则进入(14)
	// (14) 跳过之后的处理步骤。
	// (15) 将请求添加至toFlush队列。
	// (16) 若toFlush队列大小大于1000，则刷新至磁盘，进入(2)。
	public void run() {
        try {
            // we do this in an attempt to ensure that not all of the servers
            // in the ensemble take a snapshot at the same time
        	// 设置randRoll大小，确保所有的服务器在同一时间不是使用的同一个快照
            resetSnapshotStats();
            lastFlushTime = Time.currentElapsedTime();
            while (true) {
                ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_SIZE.add(queuedRequests.size());

                long pollTime = Math.min(zks.getMaxWriteQueuePollTime(), getRemainingDelay());
                // 从请求队列中取出一个请求，若队列为空，并且在指定时间内获取不到数据，则返回空
                Request si = queuedRequests.poll(pollTime, TimeUnit.MILLISECONDS);
                if (si == null) {
                    /* We timed out looking for more writes to batch, go ahead and flush immediately */
                	// 刷新到磁盘
                    flush();
                    // 从请求队列中取出一个请求，若队列为空会阻塞
                    si = queuedRequests.take();
                }

                // 在关闭处理器之后，会添加requestOfDeath，表示关闭后不再处理请求
                if (si == REQUEST_OF_DEATH) {
                    break;
                }

                long startProcessTime = Time.currentElapsedTime();
                ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_TIME.add(startProcessTime - si.syncQueueStartTime);

                // track the number of records written to the log
                // 请求不为空，将请求添加至日志文件，只有事务性请求才会返回true
                if (!si.isThrottled() && zks.getZKDatabase().append(si)) {
                	// 是否需要把当前流里的数据刷新到磁盘，同时滚动事务日志
                    if (shouldSnapshot()) {
                        resetSnapshotStats();
                        // 滚动事务日志
                        zks.getZKDatabase().rollLog();
                        // 构建snapshot
                        if (!snapThreadMutex.tryAcquire()) {
                            LOG.warn("Too busy to snap, skipping");
                        } else {
                        	// 创建线程来处理快照
                            new ZooKeeperThread("Snapshot Thread") {
                                @Override
								public void run() {
                                    try {
                                    	// 进行快照
                                        zks.takeSnapshot();
                                    } catch (Exception e) {
                                        LOG.warn("Unexpected exception", e);
                                    } finally {
                                        snapThreadMutex.release();
                                    }
                                }
                            }.start();
                        }
                    }
                } else if (toFlush.isEmpty()) { // 等待被刷新到磁盘的请求队列为空
                    // optimization for read heavy workloads
                    // if this is a read or a throttled request(which doesn't need to be written to the disk),
                    // and there are no pending flushes (writes), then just pass this to the next processor
                	// 如果这是一个read（不是事务）, 并且没有pending的flushes(writes), 那么直接传递到下一个处理器
                	if (nextProcessor != null) { // 下个处理器不为空
                		// 下个处理器开始处理请求
                        nextProcessor.processRequest(si);
                        // 处理器是Flushable的
                        if (nextProcessor instanceof Flushable) {
                        	// 刷新到磁盘
                            ((Flushable) nextProcessor).flush();
                        }
                    }
                	// 跳过后续处理
                    continue;
                }
                // 将请求添加至刷新磁盘队列
                toFlush.add(si);
                if (shouldFlush()) {
                    flush();
                }
                ServerMetrics.getMetrics().SYNC_PROCESS_TIME.add(Time.currentElapsedTime() - startProcessTime);
            }
        } catch (Throwable t) { // 出现异常
            handleException(this.getName(), t);
        }
        LOG.info("SyncRequestProcessor exited!");
    }

    /**该函数主要用于将toFlush队列中的请求刷新到磁盘中
     * @throws IOException
     * @throws RequestProcessorException
     */
    private void flush() throws IOException, RequestProcessorException {
    	// 队列为空，返回
        if (this.toFlush.isEmpty()) {
            return;
        }

        ServerMetrics.getMetrics().BATCH_SIZE.add(toFlush.size());

        long flushStartTime = Time.currentElapsedTime();
        // 提交至ZK数据库
        zks.getZKDatabase().commit();
        ServerMetrics.getMetrics().SYNC_PROCESSOR_FLUSH_TIME.add(Time.currentElapsedTime() - flushStartTime);

        if (this.nextProcessor == null) {
            this.toFlush.clear();
        } else {
            while (!this.toFlush.isEmpty()) { // 队列不为空
            	// 从队列移除请求
                final Request i = this.toFlush.remove();
                long latency = Time.currentElapsedTime() - i.syncQueueStartTime;
                ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUE_AND_FLUSH_TIME.add(latency);
                // 下个处理器开始处理请求
                this.nextProcessor.processRequest(i);
            }
            if (this.nextProcessor instanceof Flushable) { // 下个处理器不为空并且是Flushable的
                // 刷新到磁盘
                ((Flushable) this.nextProcessor).flush();
            }
        }
        lastFlushTime = Time.currentElapsedTime();
    }

    @Override
    // 该函数用于关闭SyncRequestProcessor处理器，其首先会在queuedRequests队列中添加一个结束请求，然后再判断SyncRequestProcessor是否还在运行，若是，则会等待其结束；之后判断toFlush队列是否为空，若不为空，则刷新到磁盘中
	public void shutdown() {
        LOG.info("Shutting down");
        // 添加结束请求至队列
        queuedRequests.add(REQUEST_OF_DEATH);
        try {
        	// 还在运行，等待该线程终止
            this.join();
            // 队列不为空，刷新到磁盘
            this.flush();
        } catch (InterruptedException e) {
            LOG.warn("Interrupted while wating for {} to finish", this);
            Thread.currentThread().interrupt();
        } catch (IOException e) {
            LOG.warn("Got IO exception during shutdown");
        } catch (RequestProcessorException e) {
            LOG.warn("Got request processor exception during shutdown");
        }
        if (nextProcessor != null) {
            nextProcessor.shutdown();
        }
    }

    @Override
	public void processRequest(final Request request) {
        Objects.requireNonNull(request, "Request cannot be null");

        request.syncQueueStartTime = Time.currentElapsedTime();
        queuedRequests.add(request);
        ServerMetrics.getMetrics().SYNC_PROCESSOR_QUEUED.add(1);
    }

}
