/*
 * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 *
 */

/*
 *
 *
 *
 *
 *
 * Written by Doug Lea with assistance from members of JCP JSR-166
 * Expert Group and released to the public domain, as explained at
 * http://creativecommons.org/publicdomain/zero/1.0/
 */

package java.util.concurrent;

import java.security.AccessControlContext;
import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.concurrent.locks.AbstractQueuedSynchronizer;
import java.util.concurrent.locks.Condition;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.*;

/**
 * An {@link ExecutorService} that executes each submitted task using
 * one of possibly several pooled threads, normally configured
 * using {@link Executors} factory methods.
 *
 * <p>Thread pools address two different problems: they usually
 * provide improved performance when executing large numbers of
 * asynchronous tasks, due to reduced per-task invocation overhead,
 * and they provide a means of bounding and managing the resources,
 * including threads, consumed when executing a collection of tasks.
 * Each {@code ThreadPoolExecutor} also maintains some basic
 * statistics, such as the number of completed tasks.
 *
 * <p>To be useful across a wide range of contexts, this class
 * provides many adjustable parameters and extensibility
 * hooks. However, programmers are urged to use the more convenient
 * {@link Executors} factory methods {@link
 * Executors#newCachedThreadPool} (unbounded thread pool, with
 * automatic thread reclamation), {@link Executors#newFixedThreadPool}
 * (fixed size thread pool) and {@link
 * Executors#newSingleThreadExecutor} (single background thread), that
 * preconfigure settings for the most common usage
 * scenarios. Otherwise, use the following guide when manually
 * configuring and tuning this class:
 *
 * <dl>
 *
 * <dt>Core and maximum pool sizes</dt>
 *
 * <dd>A {@code ThreadPoolExecutor} will automatically adjust the
 * pool size (see {@link #getPoolSize})
 * according to the bounds set by
 * corePoolSize (see {@link #getCorePoolSize}) and
 * maximumPoolSize (see {@link #getMaximumPoolSize}).
 *
 * When a new task is submitted in method {@link #execute(Runnable)},
 * and fewer than corePoolSize threads are running, a new thread is
 * created to handle the request, even if other worker threads are
 * idle.  If there are more than corePoolSize but less than
 * maximumPoolSize threads running, a new thread will be created only
 * if the queue is full.  By setting corePoolSize and maximumPoolSize
 * the same, you create a fixed-size thread pool. By setting
 * maximumPoolSize to an essentially unbounded value such as {@code
 * Integer.MAX_VALUE}, you allow the pool to accommodate an arbitrary
 * number of concurrent tasks. Most typically, core and maximum pool
 * sizes are set only upon construction, but they may also be changed
 * dynamically using {@link #setCorePoolSize} and {@link
 * #setMaximumPoolSize}. </dd>
 *
 * <dt>On-demand construction</dt>
 *
 * <dd>By default, even core threads are initially created and
 * started only when new tasks arrive, but this can be overridden
 * dynamically using method {@link #prestartCoreThread} or {@link
 * #prestartAllCoreThreads}.  You probably want to prestart threads if
 * you construct the pool with a non-empty queue. </dd>
 *
 * <dt>Creating new threads</dt>
 *
 * <dd>New threads are created using a {@link ThreadFactory}.  If not
 * otherwise specified, a {@link Executors#defaultThreadFactory} is
 * used, that creates threads to all be in the same {@link
 * ThreadGroup} and with the same {@code NORM_PRIORITY} priority and
 * non-daemon status. By supplying a different ThreadFactory, you can
 * alter the thread's name, thread group, priority, daemon status,
 * etc. If a {@code ThreadFactory} fails to create a thread when asked
 * by returning null from {@code newThread}, the executor will
 * continue, but might not be able to execute any tasks. Threads
 * should possess the "modifyThread" {@code RuntimePermission}. If
 * worker threads or other threads using the pool do not possess this
 * permission, service may be degraded: configuration changes may not
 * take effect in a timely manner, and a shutdown pool may remain in a
 * state in which termination is possible but not completed.</dd>
 *
 * <dt>Keep-alive times</dt>
 *
 * <dd>If the pool currently has more than corePoolSize threads,
 * excess threads will be terminated if they have been idle for more
 * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
 * This provides a means of reducing resource consumption when the
 * pool is not being actively used. If the pool becomes more active
 * later, new threads will be constructed. This parameter can also be
 * changed dynamically using method {@link #setKeepAliveTime(long,
 * TimeUnit)}.  Using a value of {@code Long.MAX_VALUE} {@link
 * TimeUnit#NANOSECONDS} effectively disables idle threads from ever
 * terminating prior to shut down. By default, the keep-alive policy
 * applies only when there are more than corePoolSize threads. But
 * method {@link #allowCoreThreadTimeOut(boolean)} can be used to
 * apply this time-out policy to core threads as well, so long as the
 * keepAliveTime value is non-zero. </dd>
 *
 * <dt>Queuing</dt>
 *
 * <dd>Any {@link BlockingQueue} may be used to transfer and hold
 * submitted tasks.  The use of this queue interacts with pool sizing:
 *
 * <ul>
 *
 * <li> If fewer than corePoolSize threads are running, the Executor
 * always prefers adding a new thread
 * rather than queuing.</li>
 *
 * <li> If corePoolSize or more threads are running, the Executor
 * always prefers queuing a request rather than adding a new
 * thread.</li>
 *
 * <li> If a request cannot be queued, a new thread is created unless
 * this would exceed maximumPoolSize, in which case, the task will be
 * rejected.</li>
 *
 * </ul>
 *
 * There are three general strategies for queuing:
 * <ol>
 *
 * <li> <em> Direct handoffs.</em> A good default choice for a work
 * queue is a {@link SynchronousQueue} that hands off tasks to threads
 * without otherwise holding them. Here, an attempt to queue a task
 * will fail if no threads are immediately available to run it, so a
 * new thread will be constructed. This policy avoids lockups when
 * handling sets of requests that might have internal dependencies.
 * Direct handoffs generally require unbounded maximumPoolSizes to
 * avoid rejection of new submitted tasks. This in turn admits the
 * possibility of unbounded thread growth when commands continue to
 * arrive on average faster than they can be processed.  </li>
 *
 * <li><em> Unbounded queues.</em> Using an unbounded queue (for
 * example a {@link LinkedBlockingQueue} without a predefined
 * capacity) will cause new tasks to wait in the queue when all
 * corePoolSize threads are busy. Thus, no more than corePoolSize
 * threads will ever be created. (And the value of the maximumPoolSize
 * therefore doesn't have any effect.)  This may be appropriate when
 * each task is completely independent of others, so tasks cannot
 * affect each others execution; for example, in a web page server.
 * While this style of queuing can be useful in smoothing out
 * transient bursts of requests, it admits the possibility of
 * unbounded work queue growth when commands continue to arrive on
 * average faster than they can be processed.  </li>
 *
 * <li><em>Bounded queues.</em> A bounded queue (for example, an
 * {@link ArrayBlockingQueue}) helps prevent resource exhaustion when
 * used with finite maximumPoolSizes, but can be more difficult to
 * tune and control.  Queue sizes and maximum pool sizes may be traded
 * off for each other: Using large queues and small pools minimizes
 * CPU usage, OS resources, and context-switching overhead, but can
 * lead to artificially low throughput.  If tasks frequently block (for
 * example if they are I/O bound), a system may be able to schedule
 * time for more threads than you otherwise allow. Use of small queues
 * generally requires larger pool sizes, which keeps CPUs busier but
 * may encounter unacceptable scheduling overhead, which also
 * decreases throughput.  </li>
 *
 * </ol>
 *
 * </dd>
 *
 * <dt>Rejected tasks</dt>
 *
 * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
 * <em>rejected</em> when the Executor has been shut down, and also when
 * the Executor uses finite bounds for both maximum threads and work queue
 * capacity, and is saturated.  In either case, the {@code execute} method
 * invokes the {@link
 * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
 * method of its {@link RejectedExecutionHandler}.  Four predefined handler
 * policies are provided:
 *
 * <ol>
 *
 * <li> In the default {@link ThreadPoolExecutor.AbortPolicy}, the
 * handler throws a runtime {@link RejectedExecutionException} upon
 * rejection. </li>
 *
 * <li> In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread
 * that invokes {@code execute} itself runs the task. This provides a
 * simple feedback control mechanism that will slow down the rate that
 * new tasks are submitted. </li>
 *
 * <li> In {@link ThreadPoolExecutor.DiscardPolicy}, a task that
 * cannot be executed is simply dropped.  </li>
 *
 * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the
 * executor is not shut down, the task at the head of the work queue
 * is dropped, and then execution is retried (which can fail again,
 * causing this to be repeated.) </li>
 *
 * </ol>
 *
 * It is possible to define and use other kinds of {@link
 * RejectedExecutionHandler} classes. Doing so requires some care
 * especially when policies are designed to work only under particular
 * capacity or queuing policies. </dd>
 *
 * <dt>Hook methods</dt>
 *
 * <dd>This class provides {@code protected} overridable
 * {@link #beforeExecute(Thread, Runnable)} and
 * {@link #afterExecute(Runnable, Throwable)} methods that are called
 * before and after execution of each task.  These can be used to
 * manipulate the execution environment; for example, reinitializing
 * ThreadLocals, gathering statistics, or adding log entries.
 * Additionally, method {@link #terminated} can be overridden to perform
 * any special processing that needs to be done once the Executor has
 * fully terminated.
 *
 * <p>If hook or callback methods throw exceptions, internal worker
 * threads may in turn fail and abruptly terminate.</dd>
 *
 * <dt>Queue maintenance</dt>
 *
 * <dd>Method {@link #getQueue()} allows access to the work queue
 * for purposes of monitoring and debugging.  Use of this method for
 * any other purpose is strongly discouraged.  Two supplied methods,
 * {@link #remove(Runnable)} and {@link #purge} are available to
 * assist in storage reclamation when large numbers of queued tasks
 * become cancelled.</dd>
 *
 * <dt>Finalization</dt>
 *
 * <dd>A pool that is no longer referenced in a program <em>AND</em>
 * has no remaining threads will be {@code shutdown} automatically. If
 * you would like to ensure that unreferenced pools are reclaimed even
 * if users forget to call {@link #shutdown}, then you must arrange
 * that unused threads eventually die, by setting appropriate
 * keep-alive times, using a lower bound of zero core threads and/or
 * setting {@link #allowCoreThreadTimeOut(boolean)}.  </dd>
 *
 * </dl>
 *
 * <p><b>Extension example</b>. Most extensions of this class
 * override one or more of the protected hook methods. For example,
 * here is a subclass that adds a simple pause/resume feature:
 *
 *  <pre> {@code
 * class PausableThreadPoolExecutor extends ThreadPoolExecutor {
 *   private boolean isPaused;
 *   private ReentrantLock pauseLock = new ReentrantLock();
 *   private Condition unpaused = pauseLock.newCondition();
 *
 *   public PausableThreadPoolExecutor(...) { super(...); }
 *
 *   protected void beforeExecute(Thread t, Runnable r) {
 *     super.beforeExecute(t, r);
 *     pauseLock.lock();
 *     try {
 *       while (isPaused) unpaused.await();
 *     } catch (InterruptedException ie) {
 *       t.interrupt();
 *     } finally {
 *       pauseLock.unlock();
 *     }
 *   }
 *
 *   public void pause() {
 *     pauseLock.lock();
 *     try {
 *       isPaused = true;
 *     } finally {
 *       pauseLock.unlock();
 *     }
 *   }
 *
 *   public void resume() {
 *     pauseLock.lock();
 *     try {
 *       isPaused = false;
 *       unpaused.signalAll();
 *     } finally {
 *       pauseLock.unlock();
 *     }
 *   }
 * }}</pre>
 *
 * <pre>
 * 【线程池】在内部实际上构建了一个【生产者 - 消费者模型】，将线程和任务两者解耦，从而良好的缓冲任务，以及复用线程。
 * 既然解耦了线程和任务，那么我们能想到的自然就是通过【阻塞队列 {@link BlockingQueue}】来实现。阻塞队列缓存任务，工作线程从阻塞队列中获取任务。
 * 线程池的使用者提交任务（即，将任务交给线程池去执行），线程池消费任务。
 *
 * 1. 线程池的运行主要分为两部分：任务管理、线程管理。
 * 1.1 任务管理
 * 1). 介绍
 *     任务管理充当生产者角色。当任务提交后，线程池会判断该任务的后续流转：
 *     ① 直接申请线程执行该任务；
 *     ② 将任务缓冲到队列中，等待线程执行；
 *     ③ 执行拒绝策略。
 * 2). 任务执行机制
 *     ① 任务调度
 *        所有任务的调度都是由 {@link #execute(Runnable)} 方法完成的。
 *     ② 任务缓冲
 *     ③ 任务申请
 *     ④ 任务拒绝
 *        拒绝策略是线程的保护部分。
 *        线程池有一个最大容量，当线程池的任务缓存队列已满，并且线程池中的线程数达到 {@link #maximumPoolSize} 时，
 *        就需要采取拒绝策略来拒绝掉该任务，保护线程池。
 *        当然，拒绝策略我们也可以自己去实现。有一种实现方案就是把任务放到 mq 中。
 * 1.2 线程管理
 *    线程管理充当消费者角色。她们被统一维护在线程池内部，根据任务请求进行线程的分配，
 *    当线程执行完任务后则会继续取新的任务去执行，最终当线程获取不到任务的时候，线程就会被回收。
 * 2. 线程池的运行机制需要从以下 3 个方面进行研究
 *    1). 线程池如何维护自身状态？
 *    2). 线程池如何管理任务？
 *    3). 线程池如何管理线程？
 * 3. 线程池带来的优点
 *    1). 降低资源消耗
 *        通过重复利用已经创建的线程来降低线程创建和销毁造成的消耗。
 *    2). 提高响应速度
 *        当有任务提交时，任务可以不用等到线程创建就能立即执行。
 *        当然，不管是核心线程还是非核心线程也是在有任务提交时才会创建，所以只能说，后续提交的任务和阻塞队列中的任务【可能】就直接复用已创建的线程来执行任务。
 *    3). 提高线程的可管理性
 * 5. 线程池的核心方法入口: {@link #execute(Runnable)}。
 * 6. 高并发场景下创建多少线程合适？
 *    创建多少线程合适，要看多线程具体的应用场景。一般来说，我们将其分为：CPU密集型任务 和 IO密集型任务。
 * 6.1 CPU密集型任务
 *     对于 CPU密集型任务，多线程本质上 就是要 提升多核CPU的利用率（压榨 CPU）。
 *     比如，一个4核CPU，每个CPU内核上跑一个线程，同一时刻就可以跑4个线程，所以说，理论上创建4个线程就可以了，
 *   再多创建线程也只是增加 线程切换的成本。
 *     结论：针对 CPU密集型任务，理论上【线程池中的线程数 = CPU核数】。但是在实际工作中，一般会设置为【CPU核数+1】，这样的话，
 *   当线程因为偶尔的内存页失效或其他原因导致阻塞时，这个额外的线程就可以顶上去，从而保证 CPU的利用率。
 * 6.2 IO密集型任务
 *     对于I/O密集型任务，最佳的线程数 是与 程序中CPU计算和I/O操作的耗时比例相关。
 *     1). 单核CPU
 *         最佳线程数 = 1 + （I/O耗时 / CPU耗时）
 *         我们令 R = I/O耗时 / CPU耗时，可以这样理解：当线程A执行IO操作时，另外R个线程正好执行完各自的CPU计算，这样CPU的利用率就达到了100%。
 *     2). 多核CPU
 *         多个CPU的最佳线程数 在单核CPU最佳线程数的基础上，乘以 CPU核数就可以了。
 *         最佳线程数 = CPU核数 * [1 + (IO耗时 / CPU耗时)]
 *     总结：上述公式计算的结果是最佳理论值，实际工作中还是要通过实际压测数据来找到最佳线程数，将硬件的性能发挥到极致。
 * 【更多内容也可以参见《002007-线程池.md》】。
 * </pre>
 *
 * @since 1.5
 * @author Doug Lea
 */
public class ThreadPoolExecutor extends AbstractExecutorService {
    /**
     * The main pool control state, ctl, is an atomic integer packing
     * two conceptual fields
     *   workerCount, indicating the effective number of threads
     *   runState,    indicating whether running, shutting down etc
     *
     * In order to pack them into one int, we limit workerCount to
     * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2
     * billion) otherwise representable. If this is ever an issue in
     * the future, the variable can be changed to be an AtomicLong,
     * and the shift/mask constants below adjusted. But until the need
     * arises, this code is a bit faster and simpler using an int.
     *
     * The workerCount is the number of workers that have been
     * permitted to start and not permitted to stop.  The value may be
     * transiently different from the actual number of live threads,
     * for example when a ThreadFactory fails to create a thread when
     * asked, and when exiting threads are still performing
     * bookkeeping before terminating. The user-visible pool size is
     * reported as the current size of the workers set.
     *
     * The runState provides the main lifecycle control, taking on values:
     *
     *   RUNNING:  Accept new tasks and process queued tasks
     *   SHUTDOWN: Don't accept new tasks, but process queued tasks
     *   STOP:     Don't accept new tasks, don't process queued tasks,
     *             and interrupt in-progress tasks
     *   TIDYING:  All tasks have terminated, workerCount is zero,
     *             the thread transitioning to state TIDYING
     *             will run the terminated() hook method
     *   TERMINATED: terminated() has completed
     *
     * The numerical order among these values matters, to allow
     * ordered comparisons. The runState monotonically increases over
     * time, but need not hit each state. The transitions are:
     *
     * 线程【池】的状态转换：
     * RUNNING -> SHUTDOWN
     *    On invocation of {@link #shutdown()}, perhaps implicitly in finalize()
     * (RUNNING or SHUTDOWN) -> STOP
     *    On invocation of shutdownNow()
     * SHUTDOWN -> TIDYING
     *    When both queue and pool are empty，当任务队列和线程池都清空后，会由 shutdown 转换为 tidying。
     * STOP -> TIDYING
     *    When pool is empty
     * TIDYING -> TERMINATED
     *    When the terminated() hook method has completed
     *
     * Threads waiting in awaitTermination() will return when the
     * state reaches TERMINATED.
     *
     * Detecting the transition from SHUTDOWN to TIDYING is less
     * straightforward than you'd like because the queue may become
     * empty after non-empty and vice versa during SHUTDOWN state, but
     * we can only terminate if, after seeing that it is empty, we see
     * that workerCount is 0 (which sometimes entails a recheck -- see
     * below).
     */
    /**
     * 1. 基本说明
     *    线程池使用 ctl，这一个变量来维护了两个值：运行状态（runState）和 线程数量（workerCount）。
     *    总共 32 位（从初始化就可以看出来，看{@link #ctlOf(int, int)}方法的解释），
     *    高3位保存 runState，低29位保存 workerCount，两个变量互不干扰。
     *
     *    ① 3位可以表示 2^3 = 8 种排列组合（二进制最小值为000，最大值为111），而线程池的运行状态就只有5种，所以3位完全够表达。
     *      假设只用2位来表示的话，那也就只有4中排列组合，自然也就不足以表达5种状态。
     *    ② 29位用来表示线程池中可以设置最大的线程数量，也就是说 maximumPoolSize 可以设置的最大值不能超过 CAPACITY。
     * 2. 初始值
     *    111 0 0000 0000 0000 0000 0000 0000 0000
     *    高 3 位：表示线程池状态为 running。
     *    低 29 位：表示线程池中的线程数量为 0。
     * 3. 解释
     *    Q：为什么使用一个变量维护两个值？
     *    A：假设我们现在使用两个变量分别维护两个值。那么，在做某些决策时，假如说，出现了不一致的情况，
     *       此时，我们必然需要维护两个变量的一致性，那么，能想到的就是使用锁来进行维护了。
     */
    private final AtomicInteger ctl = new AtomicInteger(ctlOf(RUNNING, 0));

    /**
     * COUNT_BITS = 32 - 3 = 29; 意味着前 3 位用于存放线程池的状态，低 29 位用来存放线程数。
     */
    private static final int COUNT_BITS = Integer.SIZE - 3;

    /**
     * <pre>
     *      1 的二进制为 0001, 向左移动 COUNT_BITS（29） 位，变成：0010 0000 0000 0000 0000 0000 0000 0000 = 2^29；
     *  CAPACITY = 2^29 -1 = 000 1 1111 1111 1111 1111 1111 1111 1111 = 2^0 + 2^1 + ... + 2^28
     *  表示线程池中可以设置的最大线程数，用 32位的 低29 位 表示。
     *      说明 maximumPoolSize 可以设置的最大值不能超过 CAPACITY。
     * </pre>
     */
    private static final int CAPACITY   = (1 << COUNT_BITS) - 1;

    // 线程池的几种状态
    /**
     * runState is stored in the high-order bits（运行状态存储在高阶位中）
     * 1). 计算过程：-1 转换为二进制为 1111（转换过程为：① 正1 的二进制为 0001，② 将其取反得到反码为 1110，③ 再将其 +1 得到补码为 1111）；
     *              将 1111 向左移动 29 位得到：
     *     1110 0000 0000 0000 0000 0000 0000 0000
     * 2). 线程池的运行状态存放在【高 3 位】。
     * 3). RUNNING: 正常状态，接受新的任务，处理 工作队列workQueue 中的任务。
     */
    private static final int RUNNING    = -1 << COUNT_BITS;
    // 000 0 0000 0000 0000 0000 0000 0000 0000
    // 表示线程池处于关闭状态。此时，线程池不能接受新提交的任务，但是，不会中断正在执行任务的线程，并且会继续处理工作队列中的任务。
    private static final int SHUTDOWN   =  0 << COUNT_BITS;
    // 001 0 0000 0000 0000 0000 0000 0000 0000
    // 表示线程池处于停止状态。此时，线程池不能接受新提交的任务，也不再处理等待队列中的任务，同时会中断正在执行任务的线程，使得正在执行的任务被中断。
    private static final int STOP       =  1 << COUNT_BITS;
    // 010 0 0000 0000 0000 0000 0000 0000 0000
    // 所有的任务都销毁了，workerCount 为 0。换句话说，如果线程池中已经没有正在执行的任务，并且线程池中的阻塞队列为空，同时线程池中的工作线程数量为0，线程池就会进入 tidying 状态。
    // 线程池的状态为 tidying 状态时，会执行钩子方法 terminated()。具体的可以看方法 tryTerminate()。
    private static final int TIDYING    =  2 << COUNT_BITS;
    // 011 0 0000 0000 0000 0000 0000 0000 0000
    // terminated() 方法结束后，线程池的状态就会变成这个。
    private static final int TERMINATED =  3 << COUNT_BITS;

    // Packing and unpacking ctl
    /**
     * <pre>
     * 计算线程池当前的运行状态。
     *
     * ~CAPACITY：位运算符 取反，取反后的值为 111 0 0000 0000 0000 0000 0000 0000 0000。
     * 任何一个整数 c 与 (~CAPACITY) 进行 逻辑与运算，低 29 位都会变成 0，最终得到的就是整数 c 的高三位，
     * 也就是线程池的状态。
     * </pre>
     *
     * @param c，通常在调用该方法之前，会先调用 {@link #ctl} 的 get()方法，然后将其作为入参来调用该方法。
     */
    private static int runStateOf(int c)     { return c & ~CAPACITY; }
    /**
     *  计算线程池当前工作的线程数量：将入参 c 的高3位修改为0（因为CAPACITY的高3位是000，那么与c进行&操作，c的高3位也会变为0），
     * 这样也就只剩下低29位没变，从而得到线程池中的线程数。
     * @param c，通常在调用该方法之前，会先调用 {@link #ctl} 的 get()方法，然后将其作为入参来调用该方法。
     */
    private static int workerCountOf(int c)  { return c & CAPACITY; }
    /**
     * 通过 线程池的运行状态（runState）和 线程池的线程数（workerCount）生成 {@link #ctl}。
     */
    private static int ctlOf(int rs, int wc) { return rs | wc; }

    /*
     * Bit field accessors that don't require unpacking ctl.
     * These depend on the bit layout and on workerCount being never negative.
     */

    private static boolean runStateLessThan(int c, int s) {
        return c < s;
    }

    private static boolean runStateAtLeast(int c, int s) {
        return c >= s;
    }

    private static boolean isRunning(int c) {
        return c < SHUTDOWN;
    }

    /**
     * Attempts to CAS-increment the workerCount field of ctl.
     */
    private boolean compareAndIncrementWorkerCount(int expect) {
        return ctl.compareAndSet(expect, expect + 1);
    }

    /**
     * Attempts to CAS-decrement the workerCount field of ctl.
     */
    private boolean compareAndDecrementWorkerCount(int expect) {
        return ctl.compareAndSet(expect, expect - 1);
    }

    /**
     * Decrements the workerCount field of ctl. This is called only on
     * abrupt termination of a thread (see processWorkerExit). Other
     * decrements are performed within getTask.
     */
    private void decrementWorkerCount() {
        do {} while (! compareAndDecrementWorkerCount(ctl.get()));
    }

    /**
     * The queue used for holding tasks and handing off to worker
     * threads.  We do not require that workQueue.poll() returning
     * null necessarily means that workQueue.isEmpty(), so rely
     * solely on isEmpty to see if the queue is empty (which we must
     * do for example when deciding whether to transition from
     * SHUTDOWN to TIDYING).  This accommodates special-purpose
     * queues such as DelayQueues for which poll() is allowed to
     * return null even if it may later return non-null when delays
     * expire.
     * 任务队列/阻塞队列：存储等待执行的任务。
     */
    private final BlockingQueue<Runnable> workQueue;

    /**
     * Lock held on access to workers set and related bookkeeping.
     * While we could use a concurrent set of some sort, it turns out
     * to be generally preferable to use a lock. Among the reasons is
     * that this serializes interruptIdleWorkers, which avoids
     * unnecessary interrupt storms, especially during shutdown.
     * Otherwise exiting threads would concurrently interrupt those
     * that have not yet interrupted. It also simplifies some of the
     * associated statistics bookkeeping of largestPoolSize etc. We
     * also hold mainLock on shutdown and shutdownNow, for the sake of
     * ensuring workers set is stable while separately checking
     * permission to interrupt and actually interrupting.
     * 【整个线程池的全局锁】。
     */
    private final ReentrantLock mainLock = new ReentrantLock();

    /**
     * Set containing all worker threads in pool. Accessed only when holding mainLock.
     * 存放线程池中线程的集合。在访问这个集合的时候，必须获得 mainLock 锁。
     */
    private final HashSet<Worker> workers = new HashSet<Worker>();

    /**
     * Wait condition to support awaitTermination
     */
    private final Condition termination = mainLock.newCondition();

    /**
     * Tracks largest attained pool size. Accessed only under
     * mainLock.
     */
    private int largestPoolSize;

    /**
     * Counter for completed tasks. Updated only on termination of
     * worker threads. Accessed only under mainLock.
     * 线程池中所有线程已完成任务的计数器，仅仅在工作线程终止时进行更新。
     */
    private long completedTaskCount;

    /*
     * All user control parameters are declared as volatiles so that
     * ongoing actions are based on freshest values, but without need
     * for locking, since no internal invariants depend on them
     * changing synchronously with respect to other actions.
     */

    /**
     * Factory for new threads. All threads are created using this
     * factory (via method addWorker).  All callers must be prepared
     * for addWorker to fail, which may reflect a system or user's
     * policy limiting the number of threads.  Even though it is not
     * treated as an error, failure to create threads may result in
     * new tasks being rejected or existing ones remaining stuck in
     * the queue.
     *
     * We go further and preserve pool invariants even in the face of
     * errors such as OutOfMemoryError, that might be thrown while
     * trying to create threads.  Such errors are rather common due to
     * the need to allocate a native stack in Thread.start, and users
     * will want to perform clean pool shutdown to clean up.  There
     * will likely be enough memory available for the cleanup code to
     * complete without encountering yet another OutOfMemoryError.
     * 用来创建线程的线程工厂（创建线程的时机看{@link Worker}的构造器），默认取值为 Executors.defaultThreadFactory()。
     */
    private volatile ThreadFactory threadFactory;

    /**
     * Handler called when saturated or shutdown in execute.
     */
    private volatile RejectedExecutionHandler handler;

    /**
     * Timeout in nanoseconds for idle threads waiting for work.
     * Threads use this timeout when there are more than corePoolSize
     * present or if allowCoreThreadTimeOut. Otherwise they wait
     * forever for new work.
     * <pre>
     * 线程空闲超时时间：当线程池中的线程数量超过 corePoolSize 时，非核心线程 在空闲状态下的存活时间。
     *  ① 如果 非核心线程 的 空闲时间超过 keepAliveTime 时，该线程就会被回收，直到线程池中的线程数量不超过 corePoolSize。
     *  ② 如果 allowCoreThreadTimeout 为 true，则核心线程也会受到 keepAliveTime（空闲超时时间）的限制。
     * </pre>
     */
    private volatile long keepAliveTime;

    /**
     * If false (default), core threads stay alive even when idle.
     * If true, core threads use keepAliveTime to time out waiting
     * for work.
     * <pre>
     * 核心线程是否允许超时：① 默认值为 false：表示 核心线程 即使处于 空闲状态 也不会被回收。
     *  ② 如果设置为 true：表示 核心线程 也会像 非核心线程 一样，在空闲时间 超过 keepAliveTime 时被回收。
     * </pre>
     */
    private volatile boolean allowCoreThreadTimeOut;

    /**
     * Core pool size is the minimum number of workers to keep alive
     * (and not allow to time out etc) unless allowCoreThreadTimeOut
     * is set, in which case the minimum is zero.
     * <pre>
     * 核心线程数：线程池中始终保持活动状态的最小线程数量，即使这些线程处于空闲状态（没有执行任务）。
     *  ① 默认情况下，核心线程不会被回收。这样做是为了确保在没有任务执行时，线程池中也有一定数量的线程随时准备着来处理新的任务。
     *  ② 如果设置了 allowCoreThreadTimeOut 为 true，则核心线程也会受到 keepAliveTime 的限制，可能就会被回收。
     * </pre>
     */
    private volatile int corePoolSize;

    /**
     * Maximum pool size. Note that the actual maximum is internally bounded by CAPACITY.
     * <pre>
     * 最大线程数：线程池中允许的最大线程数量，包括 核心线程 和 非核心线程。
     *  ① 当任务队列（workQueue）已满 并且 线程池中当前线程数量大于 corePoolSize 小于 maximumPoolSize，则线程池会创建非核心线程来处理任务。
     *  ② 如果任务队列（workQueue）是 无界队列（比如，LinkedBlockingQueue），则 maximumPoolSize 会失效，因为任务会一直添加到任务队列中，不会触发创建非核心线程。
     *  ③ 当任务队列（workQueue）已满 并且 线程池中当前线程数量等于 maximumPoolSize，则线程池默认情况下会拒绝新任务。
     * 可以设置的最大值不能超过 CAPACITY。因为我们只用到了{@link #ctl}的低29位来存储线程数量。
     * </pre>
     */
    private volatile int maximumPoolSize;
    
    /**
     * <pre>
     * 1. 线程池的线程数量变化逻辑
     *  任务提交时：
     *      ① 如果 线程池中线程数量 小于 corePoolSize，则直接创建 核心线程 来执行任务。
     *      ② 如果 线程池中线程数量 大于等于 corePoolSize，则将任务加入 任务队列（workQueue）。
     *      ③ 如果 任务队列（workQueue）已满，且 线程池中线程数量 小于 maximumPoolSize，则创建 非核心线程 来执行任务。
     *      ④ 如果 任务队列（workQueue）已满，且 线程池中线程数量 大于等于 maximumPoolSize，则调用 拒绝执行处理器（RejectedExecutionHandler）。
     * 2. 线程回收逻辑
     *      ① 非核心线程 的 空闲时间 超过 keepAliveTime，则会被回收，直到 线程数 等于 corePoolSize。
     *      ② 如果 allowCoreThreadTimeOut 为 true，则 核心线程 也会受到 keepAliveTime 的限制。
     * 3. 动态调整线程池行为
     *      ① 默认情况（allowCoreThreadTimeOut 为 false）：核心线程始终存活，非核心线程 的 空闲时间 超过 keepAliveTime 会被回收。
     *      ② 设置 allowCoreThreadTimeOut 为 true时：所有线程（包括 核心线程）都会受到 keepAliveTime 的限制，
     *        也就是说，所有线程 的 空闲时间 超过 keepAliveTime 时都会被回收。
     * </pre>
     */

    /**
     * The default rejected execution handler
     * 默认拒绝执行处理器：当线程池满载时，会调用该处理器的 rejectedExecution()方法。
     */
    private static final RejectedExecutionHandler defaultHandler =
        new AbortPolicy();

    /**
     * Permission required for callers of shutdown and shutdownNow.
     * We additionally require (see checkShutdownAccess) that callers
     * have permission to actually interrupt threads in the worker set
     * (as governed by Thread.interrupt, which relies on
     * ThreadGroup.checkAccess, which in turn relies on
     * SecurityManager.checkAccess). Shutdowns are attempted only if
     * these checks pass.
     *
     * All actual invocations of Thread.interrupt (see
     * interruptIdleWorkers and interruptWorkers) ignore
     * SecurityExceptions, meaning that the attempted interrupts
     * silently fail. In the case of shutdown, they should not fail
     * unless the SecurityManager has inconsistent policies, sometimes
     * allowing access to a thread and sometimes not. In such cases,
     * failure to actually interrupt threads may disable or delay full
     * termination. Other uses of interruptIdleWorkers are advisory,
     * and failure to actually interrupt will merely delay response to
     * configuration changes so is not handled exceptionally.
     */
    private static final RuntimePermission shutdownPerm =
        new RuntimePermission("modifyThread");

    /* The context to be used when executing the finalizer, or null. */
    private final AccessControlContext acc;

    /**
     * Class Worker mainly maintains interrupt control state for
     * threads running tasks, along with other minor bookkeeping.
     * This class opportunistically extends AbstractQueuedSynchronizer
     * to simplify acquiring and releasing a lock surrounding each
     * task execution.  This protects against interrupts that are
     * intended to wake up a worker thread waiting for a task from
     * instead interrupting a task being run.  We implement a simple
     * non-reentrant mutual exclusion lock rather than use
     * ReentrantLock because we do not want worker tasks to be able to
     * reacquire the lock when they invoke pool control methods like
     * setCorePoolSize.  Additionally, to suppress interrupts until
     * the thread actually starts running tasks, we initialize lock
     * state to a negative value, and clear it upon start (in
     * runWorker).
     * Worker：工作线程
     */
    private final class Worker
        extends AbstractQueuedSynchronizer
        implements Runnable
    {
        /**
         * This class will never be serialized, but we provide a
         * serialVersionUID to suppress a javac warning.
         */
        private static final long serialVersionUID = 6138294804551838833L;

        /**
         * Thread this worker is running in.  Null if factory fails.
         * 一个 worker 绑定一个 thread，这个 thread 就是用来执行我们提交给线程池的任务。
         */
        final Thread thread;
        /**
         * Initial task to run.  Possibly null.
         * 在创建线程时，如果指定了这个线程启动以后需要执行的第一个任务，那么第一个任务就是存放在这里的。
         * 如果没有指定，那么当线程启动起来了，就自己到任务队列（BlockingQueue）中取任务（getTask 方法）。
         * 这也说明了，firstTask只会被赋值一次，也就是 Worker执行的第一个任务。
         */
        Runnable firstTask;
        /** Per-thread task counter（用于存放此线程完成的任务数，也就是这个 worker 完成的任务数） */
        volatile long completedTasks;

        /**
         * Creates with given first task and thread from ThreadFactory.
         * @param firstTask the first task (null if none)
         */
        Worker(Runnable firstTask) {
            // inhibit interrupts until runWorker
            /**
             * 首先将同步状态state设置为-1，防止在调用 runWorker() 方法之前被中断。
             * 在其他线程调用线程池的 {@link #shutdownNow()} 方法中断线程池时，如果 Worker 类的 state 状态值大于0，则会中断线程，
             * 如果 state 状态值为-1，则不会中断线程。
             */
            setState(-1);
            this.firstTask = firstTask;
            /**
             * 调用 ThreadFactory 创建一个新的线程。
             * 注意看这里调用的方法，看看是不是很像 {@link Thread#Thread(Runnable)} 构造器。
             */
            this.thread = getThreadFactory().newThread(this);
        }

        /** Delegates main run loop to outer runWorker  */
        public void run() {
            runWorker(this);
        }

        /**
         * <pre>
         * Lock methods 是否持有锁
         *  The value 0 represents the unlocked state. state = 0 表示 锁没有被获取
         *  The value 1 represents the locked state. state = 1 表示 锁已经被获取了
         * </pre>
         */

        protected boolean isHeldExclusively() {
            return getState() != 0;
        }

        /**
         * 独占式获取同步状态（非阻塞式）
         */
        protected boolean tryAcquire(int unused) {
            // 通过 CAS 设置同步状态（锁），CAS 可以保证原子性。
            // 注意看这里，同步状态的期望值是0，更新值是1，都是固定写死的值，入参 unused没有用到（从入参的取名就可以见名知意）。
            if (compareAndSetState(0, 1)) {
                // 标记获取到同步状态（更新 state 成功）的线程
                setExclusiveOwnerThread(Thread.currentThread());
                // 获取同步状态成功，返回 true
                return true;
            }

            // 获取同步状态失败，返回 false
            return false;
        }

        protected boolean tryRelease(int unused) {
            // 可以释放，清空排他线程标记
            setExclusiveOwnerThread(null);
            // 设置同步状态为 0，表示释放锁
            setState(0);
            return true;
        }

        public void lock() {
            // 【阻塞式】、【独占式】的获取锁，直接调用 AQS 提供的模板方法【独占式】获取同步状态。
            acquire(1);
        }
        public boolean tryLock()  { return tryAcquire(1); }
        public void unlock()      { release(1); }
        public boolean isLocked() { return isHeldExclusively(); }

        void interruptIfStarted() {
            Thread t;
            if (getState() >= 0 && (t = thread) != null && !t.isInterrupted()) {
                try {
                    t.interrupt();
                } catch (SecurityException ignore) {
                }
            }
        }
    }

    /*
     * Methods for setting control state
     */

    /**
     * Transitions runState to given target, or leaves it alone if
     * already at least the given target.
     *
     * @param targetState the desired state, either SHUTDOWN or STOP
     *        (but not TIDYING or TERMINATED -- use tryTerminate for that)
     */
    private void advanceRunState(int targetState) {
        for (;;) {
            int c = ctl.get();
            if (runStateAtLeast(c, targetState) ||
                ctl.compareAndSet(c, ctlOf(targetState, workerCountOf(c))))
                break;
        }
    }

    /**
     * Transitions to TERMINATED state if either (SHUTDOWN and pool
     * and queue empty) or (STOP and pool empty).  If otherwise
     * eligible to terminate but workerCount is nonzero, interrupts an
     * idle worker to ensure that shutdown signals propagate. This
     * method must be called following any action that might make
     * termination possible -- reducing worker count or removing tasks
     * from the queue during shutdown. The method is non-private to
     * allow access from ScheduledThreadPoolExecutor.
     */
    final void tryTerminate() {
        for (;;) {
            int c = ctl.get();
            // 如果【线程池的状态为 running】或者【状态大于 tidying】或者【状态为 shutdown 并且 工作队列为空】，则直接返回。
            if (isRunning(c) ||
                runStateAtLeast(c, TIDYING) ||
                (runStateOf(c) == SHUTDOWN && ! workQueue.isEmpty()))
                return;
            if (workerCountOf(c) != 0) { // Eligible to terminate
                // 如果线程池中的线程数量不等于0，则中断线程
                interruptIdleWorkers(ONLY_ONE);
                return;
            }

            final ReentrantLock mainLock = this.mainLock;
            mainLock.lock();
            try {
                // 通过 CAS 的方式将线程池的状态设置为 tidying
                if (ctl.compareAndSet(c, ctlOf(TIDYING, 0))) {
                    try {
                        terminated();
                    } finally {
                        // 将线程池的状态设置为 terminated
                        ctl.set(ctlOf(TERMINATED, 0));
                        // 唤醒所有因为调用线程池的 awaitTermination() 方法而被阻塞的线程
                        termination.signalAll();
                    }
                    return;
                }
            } finally {
                mainLock.unlock();
            }
            // else retry on failed CAS
        }
    }

    /*
     * Methods for controlling interrupts to worker threads.
     */

    /**
     * If there is a security manager, makes sure caller has
     * permission to shut down threads in general (see shutdownPerm).
     * If this passes, additionally makes sure the caller is allowed
     * to interrupt each worker thread. This might not be true even if
     * first check passed, if the SecurityManager treats some threads
     * specially.
     */
    private void checkShutdownAccess() {
        SecurityManager security = System.getSecurityManager();
        if (security != null) {
            security.checkPermission(shutdownPerm);
            final ReentrantLock mainLock = this.mainLock;
            mainLock.lock();
            try {
                for (Worker w : workers)
                    security.checkAccess(w.thread);
            } finally {
                mainLock.unlock();
            }
        }
    }

    /**
     * Interrupts all threads, even if active. Ignores SecurityExceptions
     * (in which case some threads may remain uninterrupted).
     */
    private void interruptWorkers() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            for (Worker w : workers)
                w.interruptIfStarted();
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Interrupts threads that might be waiting for tasks (as
     * indicated by not being locked) so they can check for
     * termination or configuration changes. Ignores
     * SecurityExceptions (in which case some threads may remain
     * uninterrupted).
     *
     * @param onlyOne If true, interrupt at most one worker. This is
     * called only from tryTerminate when termination is otherwise
     * enabled but there are still other workers.  In this case, at
     * most one waiting worker is interrupted to propagate shutdown
     * signals in case all threads are currently waiting.
     * Interrupting any arbitrary thread ensures that newly arriving
     * workers since shutdown began will also eventually exit.
     * To guarantee eventual termination, it suffices to always
     * interrupt only one idle worker, but shutdown() interrupts all
     * idle workers so that redundant workers exit promptly, not
     * waiting for a straggler task to finish.
     */
    private void interruptIdleWorkers(boolean onlyOne) {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // 循环所有的 Worker线程
            for (Worker w : workers) {
                Thread t = w.thread;
                /**
                 * 在这里调用 w.tryLock()方法 就是 {@link #shutdown()} 和 {@link #shutdownNow()} 的关键区别所在。
                 * shutdown()方法 会把 任务队列中的任务 继续执行完成，而 shutdownNow()方法 会把 任务队列中的任务 丢弃。
                 * 我们可以从 {@link #runWorker(Worker)}方法看出来：如果 Worker正在执行任务，则 此处的w.tryLock()就会返回false。
                 */
                if (!t.isInterrupted() && w.tryLock()) {
                    try {
                        // 如果线程没有被中断 且 worker线程获取锁成功，则对线程进行中断
                        t.interrupt();
                    } catch (SecurityException ignore) {
                    } finally {
                        w.unlock();
                    }
                }
                if (onlyOne)
                    break;
            }
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Common form of interruptIdleWorkers, to avoid having to
     * remember what the boolean argument means.
     */
    private void interruptIdleWorkers() {
        interruptIdleWorkers(false);
    }

    private static final boolean ONLY_ONE = true;

    /*
     * Misc utilities, most of which are also exported to
     * ScheduledThreadPoolExecutor
     */

    /**
     * Invokes the rejected execution handler for the given command.
     * Package-protected for use by ScheduledThreadPoolExecutor.
     */
    final void reject(Runnable command) {
        handler.rejectedExecution(command, this);
    }

    /**
     * Performs any further cleanup following run state transition on
     * invocation of shutdown.  A no-op here, but used by
     * ScheduledThreadPoolExecutor to cancel delayed tasks.
     */
    void onShutdown() {
    }

    /**
     * State check needed by ScheduledThreadPoolExecutor to
     * enable running tasks during shutdown.
     *
     * @param shutdownOK true if should return true if SHUTDOWN
     */
    final boolean isRunningOrShutdown(boolean shutdownOK) {
        int rs = runStateOf(ctl.get());
        return rs == RUNNING || (rs == SHUTDOWN && shutdownOK);
    }

    /**
     * Drains the task queue into a new list, normally using
     * drainTo. But if the queue is a DelayQueue or any other kind of
     * queue for which poll or drainTo may fail to remove some
     * elements, it deletes them one by one.
     */
    private List<Runnable> drainQueue() {
        BlockingQueue<Runnable> q = workQueue;
        ArrayList<Runnable> taskList = new ArrayList<Runnable>();
        // 将工作队列workQueue中的任务移动到 taskList集合中。
        q.drainTo(taskList);
        if (!q.isEmpty()) {
            for (Runnable r : q.toArray(new Runnable[0])) {
                if (q.remove(r))
                    taskList.add(r);
            }
        }
        return taskList;
    }

    /*
     * Methods for creating, running and cleaning up after workers
     */

    /**
     * Checks if a new worker can be added with respect to current
     * pool state and the given bound (either core or maximum). If so,
     * the worker count is adjusted accordingly, and, if possible, a
     * new worker is created and started, running firstTask as its
     * first task. This method returns false if the pool is stopped or
     * eligible to shut down. It also returns false if the thread
     * factory fails to create a thread when asked.  If the thread
     * creation fails, either due to the thread factory returning
     * null, or due to an exception (typically OutOfMemoryError in
     * Thread.start()), we roll back cleanly.
     *
     * @param firstTask the task the new thread should run first (or
     * null if none). Workers are created with an initial first task
     * (in method execute()) to bypass queuing when there are fewer
     * than corePoolSize threads (in which case we always start one),
     * or when the queue is full (in which case we must bypass queue).
     * Initially idle threads are usually created via
     * prestartCoreThread or to replace other dying workers.
     * 新线程应该首先运行的任务。
     * 当线程数小于 corePoolSize 时，我们总是会启动一个线程。
     * 当线程启动时，第一个执行的任务就是 firstTask。
     *
     * @param core if true use corePoolSize as bound, else
     * maximumPoolSize. (A boolean indicator is used here rather than a
     * value to ensure reads of fresh values after checking other pool
     * state).
     * true: 表示使用核心线程数 corePoolSize 作为创建线程的界限。也就是说，
     *       在创建这个线程的时候，如果线程池中的线程总数已经达到 corePoolSize，那么就不能响应这次创建线程的请求。
     * false: 表示使用最大线程数 maximumPoolSize 作为界限。
     * 也就是说，线程池中当前的线程数量 不应该 >= (core ? corePoolSize : maximumPoolSize)
     *
     * @return true if successful
     */
    private boolean addWorker(Runnable firstTask, boolean core) {
        // 1. 通过CAS的方式增加线程池中工作线程的数量。
        retry:
        for (;;) {
            int c = ctl.get();
            // 获取线程池当前的运行状态
            int rs = runStateOf(c);

            // Check if queue empty only if necessary.
            /**
             * 1.1
             * 1). 如果线程池已关闭（rs >= SHUTDOWN），并且满足以下其中一个条件，那么，就不需要创建 worker：
             *    ① 线程池状态大于 shutdown，其实也就是 stop、tidying、terminated；
             *    ② firstTask != null
             *    ③ workQueue.isEmpty(): 线程池状态已关闭，且队列为空。
             * 2). 说明：本质还是状态控制的问题。
             *    当线程池处于 shutdown 时，不允许提交任务，但是已有的任务继续执行。
             *    当线程池状态大于 shutdown 时，不允许提交任务，并且中断正在执行的任务。
             * 3). 代码转换：if 中的判断条件可以转换为
             *     <code>
             *      (rs >= SHUTDOWN &&
             *          (
             *              rs > SHUTDOWN ||
             *              firstTask != null ||
             *              workQueue.isEmpty()
             *          )
             *      )
             *     </code>
             */
            if (rs >= SHUTDOWN &&
                ! (rs == SHUTDOWN &&
                   firstTask == null &&
                   ! workQueue.isEmpty()))
                return false;

            // 1.2 线程池的线程数加一操作（CAS[compareAndIncrementWorkerCount(c)] + 自旋[for(;;)] 来实现）
            for (;;) {
                // 1.2.1 获取线程池中当前工作线程的数量
                int wc = workerCountOf(c);
                // 判断线程池中当前已有线程数的数量
                if (wc >= CAPACITY ||
                    wc >= (core ? corePoolSize : maximumPoolSize))
                    return false;
                // 1.2.2 通过 CAS 的方式设置线程池中的线程数（即，线程数加一操作）
                //       【这一步就是这两层 for(;;) 自旋一开始想要达到的目的】。
                if (compareAndIncrementWorkerCount(c))
                    // 如果设置成功，则直接跳出最外层 for(;;) 自旋。
                    break retry;
                /**
                 * 代码走到这一步，说明上一步的 compareAndIncrementWorkerCount(c) 失败。
                 * 失败的原因只能是【有其他线程也在尝试往线程池中创建线程】，也就是说，存在并发的往线程池添加工作线程。
                 * 所以，这里重新读取 ctl。
                 */
                c = ctl.get();  // Re-read ctl
                /**
                 * 判断线程池的状态是否与外层 for 循环拿到的线程池状态一致。
                 * ① 如果一致，说明线程池的状态没有发生变化，那么就继续执行内层的 for(;;) 自旋操作来完成线程池中线程数加一的操作；
                 * ② 如果不一致，说明有其他线程的操作，导致线程池的状态发生了改变。此时，就需要回到外层 for 循环重新开始一系列的判断等操作。
                 */
                if (runStateOf(c) != rs)
                    continue retry;
                // else CAS failed due to workerCount change; retry inner loop
            }
        }

        /**
         * 经过上面的 for 自旋后，代码走到这里说明：
         *  ① 此时，我们可以开始创建线程来执行任务了；
         *  ② 线程池中的线程数已经加一。
         */

        // 2. 创建新的工作线程 Worker
        // worker 是否已经启动
        boolean workerStarted = false;
        /** 是否已经将这个 worker 成功添加到{@link #workers}中了。 */
        boolean workerAdded = false;
        Worker w = null;
        try {
            // 把 firstTask 传给 worker 的构造器（将要执行的任务交给 Worker）
            w = new Worker(firstTask);
            // 获取 worker 中的线程对象（Worker 的构造方法会调用 ThreadFactory 来创建一个新的线程）。
            final Thread t = w.thread;
            if (t != null) {
                /**
                 * 这个 mainLock 是整个线程池的全局锁，持有这个锁才能让下面的操作“水到渠成（保证操作 workers 集合时同步）”。
                 * 【备】：关闭一个线程池也需要这个锁。
                 */
                final ReentrantLock mainLock = this.mainLock;
                mainLock.lock();
                try {
                    // Recheck while holding lock.
                    // Back out on ThreadFactory failure or if
                    // shut down before lock acquired.
                    // 重新检查线程池状态。原因是：在成功获得锁之前，可能有线程改变了线程池的状态。TMD一个劲的有线程改，烦死了。
                    int rs = runStateOf(ctl.get());

                    /**
                     * ① 线程池状态 < shutdown：那就说明线程池当前的运行状态就是 running 喽，属于正常情况。
                     * ② 线程池状态 等于 shutdown：这种情况不接受新的任务，但是会继续执行等待队列中的任务。
                     */
                    if (rs < SHUTDOWN ||
                        (rs == SHUTDOWN && firstTask == null)) {
                        if (t.isAlive()) // precheck that t is startable
                            throw new IllegalThreadStateException();
                        // 把 worker 加入到 workers 这个 HashSet 中。
                        workers.add(w);
                        int s = workers.size();
                        if (s > largestPoolSize)
                            largestPoolSize = s;
                        workerAdded = true;
                    }
                } finally {
                    mainLock.unlock();
                }

                // 如果【把 worker 添加到 workers 这个 HashSet】中成功了，那么就启动这个线程开始执行任务。
                if (workerAdded) {
                    /** 启动线程，进而调用 {@link Worker#run()} 方法。 */
                    t.start();
                    workerStarted = true;
                }
            }
        } finally {
            // 判断线程是否启动
            if (! workerStarted)
                // 如果没有启动或者启动失败，那么就需要做一些清理工作。
                addWorkerFailed(w);
        }
        // 返回线程是否启动成功。
        return workerStarted;
    }

    /**
     * Rolls back the worker thread creation.
     * - removes worker from workers, if present
     * - decrements worker count
     * - rechecks for termination, in case the existence of this
     *   worker was holding up termination
     */
    private void addWorkerFailed(Worker w) {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            if (w != null)
                // 将 worker 从 workers 集合中移除。
                workers.remove(w);
            // 通过 CAS 将线程池中的线程数量减一。
            decrementWorkerCount();
            tryTerminate();
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Performs cleanup and bookkeeping for a dying worker. Called
     * only from worker threads. Unless completedAbruptly is set,
     * assumes that workerCount has already been adjusted to account
     * for exit.  This method removes thread from worker set, and
     * possibly terminates the pool or replaces the worker if either
     * it exited due to user task exception or if fewer than
     * corePoolSize workers are running or queue is non-empty but
     * there are no workers.
     *
     * @param w the worker
     * @param completedAbruptly if the worker died due to user exception
     */
    private void processWorkerExit(Worker w, boolean completedAbruptly) {
        if (completedAbruptly) // If abrupt, then workerCount wasn't adjusted
            // 如果任务在执行过程中抛出了异常，那么在这里就需要将 workerCount 减 1。
            decrementWorkerCount();

        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // 累加完成的任务数量
            completedTaskCount += w.completedTasks;
            // 将 worker 从 workers 集合中移除
            workers.remove(w);
        } finally {
            mainLock.unlock();
        }

        // 尝试终止工作线程的执行
        tryTerminate();

        int c = ctl.get();
        // 判断线程池的状态值是否小于STOP（RUNNING 或者 SHUTDOWN）
        if (runStateLessThan(c, STOP)) {
            if (!completedAbruptly) {
                int min = allowCoreThreadTimeOut ? 0 : corePoolSize;
                if (min == 0 && ! workQueue.isEmpty())
                    min = 1;
                if (workerCountOf(c) >= min)
                    return; // replacement not needed
            }
            addWorker(null, false);
        }
    }

    /**
     * Performs blocking or timed wait for a task, depending on
     * current configuration settings, or returns null if this worker
     * must exit because of any of:
     * 1. There are more than maximumPoolSize workers (due to
     *    a call to setMaximumPoolSize).
     * 2. The pool is stopped.
     * 3. The pool is shutdown and the queue is empty.
     * 4. This worker timed out waiting for a task, and timed-out
     *    workers are subject to termination (that is,
     *    {@code allowCoreThreadTimeOut || workerCount > corePoolSize})
     *    both before and after the timed wait, and if the queue is
     *    non-empty, this worker is not the last thread in the pool.
     *
     * <pre>
     * 1. 方法的目的：worker 从阻塞队列中获取 task。
     *    这里要注意的是，所有的 worker 都要从一个阻塞队列中获取 task。
     *    也就是说，多个线程可能会同时从阻塞队列中获取 task。那么，问题就来了。
     *    Q：当多个线程从一个阻塞队列中申请任务时，阻塞队列为了合理的分配任务就要付出锁资源的代价。这种问题，我们把她叫做【连续争用问题】。
     *       详情可参见 {@link LinkedBlockingQueue#poll()} 方法，内部维护了一把 {@link LinkedBlockingQueue#takeLock} 锁。
     *    A：这个问题的根本原因就在于，线程池使用【阻塞队列】这种数据结构来存放用户提交上来的 task。所以，想要从根上解决这个问题，
     *       只能换一种数据结构。SO，就有了 LMAX 公司创造的【高性能队列 disruptor 框架】，她用了一个叫做 "环形缓冲" 的数据结构来代替队列。
     * 2. 该方法的结果有三种可能
     *    1). 阻塞，一直到获取到任务才返回。默认 corePoolSize 之内的线程是不会被回收的，她们会一直等待任务。
     *    2). 超时退出。keepAliveTime 起作用的时候，也就是说，如果在这么长的时间内部没有任务，那么就会执行关闭。
     *    3). 如果发生了以下情况，该方法就会返回 null（返回 null 就意味着要关闭线程）：
     *        ① 线程池内有【大于 maximumPoolSize】个 workers 存在。
     *        ② 线程池处于 shutdown，而且 workQueue 为空时。
     *          线程池处于 shutdown 状态时，是不再接收新提交的任务的。那么，当 workQueue 为空时，就彻底没有可执行的任务了。
     *        ③ 线程池处于 stop 状态（不仅不接受新的线程，连 workQueue 中的线程也不再执行）。
     * </pre>
     *
     * @return task, or null if the worker must exit, in which case
     *         workerCount is decremented
     *         如果工作进程必须退出，则返回 null，在这种情况下，workerCount将递减。
     */
    private Runnable getTask() {
        boolean timedOut = false; // Did the last poll() time out?

        // 自旋
        for (;;) {
            int c = ctl.get();
            // 获取线程池的运行状态
            int rs = runStateOf(c);

            // 1- 判断是否返回 null
            // Check if queue empty only if necessary.
            /**
             * 1-1 对情况 ② 和 ③ 进行判断
             * 下面的 if 判断可以改成：
             * <code>
             *     if ((rs == shutdown && workQueue.isEmpty) || rs >= stop)
             * </code>
             */
            if (rs >= SHUTDOWN && (rs >= STOP || workQueue.isEmpty())) {
                // 通过 CAS 操作，对线程池内的线程数减 1。
                decrementWorkerCount();
                return null;
            }

            // 获取线程池当前的线程数
            int wc = workerCountOf(c);

            // Are workers subject to culling? worker 会被淘汰吗？
            /**
             * worker 【可能】会被淘汰的情况：
             *      ① allowCoreThreadTimeOut: 允许核心线程数内的线程回收。
             *      ② wc > corePoolSize: 线程池内当前线程数 超过了 核心线程数。
             * Q: 这里为什么说是【可能】呢？
             * A: 因为只有当 timed 为 true 的时候，才会去判断是否超过最大存活时间（keepAliveTime），
             *    只有当超过了最大存活时间时，才会被真正淘汰。
             */
            boolean timed = allowCoreThreadTimeOut || wc > corePoolSize;

            /**
             * 1-2 对情况 ① 进行判断
             * 线程池中当前线程数 workerCount > maximumPoolSize，或者 超时，都返回 null。
             * (wc > 1 || workQueue.isEmpty()))：这个没看懂。
             */
            if ((wc > maximumPoolSize || (timed && timedOut))
                && (wc > 1 || workQueue.isEmpty())) {
                // 通过 CAS 操作，对线程池内的线程数减 1。
                if (compareAndDecrementWorkerCount(c))
                    return null;
                continue;
            }

            // 2- 代码走到这里，说明不会返回 null，不满足上面说的三种情况。
            try {
                // 从 workQueue（任务队列） 中获取任务
                // workQueue是线程池的阻塞队列，其 take()、poll()方法 都有可能会抛出 InterruptedException。
                Runnable r = timed ?
                    workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) :
                    workQueue.take();
                if (r != null)
                    // 返回获取到的任务
                    return r;
                timedOut = true;
            } catch (InterruptedException retry) {
                /**
                 * TODO 20250509 这里的注释应该是有问题的。这里发生中断应该是 workQueue.take()或者workQueue.poll()方法 抛出的中断异常。
                 * 如果 worker 发生了中断，采取的是重试。
                 * Q: 为什么会发生中断？
                 * A: 这个就需要看 {@link #setMaximumPoolSize(int)} 方法。
                 *    如果开发人员将 maximumPoolSize 调小了，导致其小于当前的 workers 的数量，
                 *    那么就意味着超出的部分线程就要被关闭，重新进入 for 循环，自然会有部分线程返回 null。
                 */
                timedOut = false;
            }
        }
    }

    /**
     * Main worker run loop.  Repeatedly gets tasks from queue and
     * executes them, while coping with a number of issues:
     *
     * 1. We may start out with an initial task, in which case we
     * don't need to get the first one. Otherwise, as long as pool is
     * running, we get tasks from getTask. If it returns null then the
     * worker exits due to changed pool state or configuration
     * parameters.  Other exits result from exception throws in
     * external code, in which case completedAbruptly holds, which
     * usually leads processWorkerExit to replace this thread.
     *
     * 2. Before running any task, the lock is acquired to prevent
     * other pool interrupts while the task is executing, and then we
     * ensure that unless pool is stopping, this thread does not have
     * its interrupt set.
     *
     * 3. Each task run is preceded by a call to beforeExecute, which
     * might throw an exception, in which case we cause thread to die
     * (breaking loop with completedAbruptly true) without processing
     * the task.
     *
     * 4. Assuming beforeExecute completes normally, we run the task,
     * gathering any of its thrown exceptions to send to afterExecute.
     * We separately handle RuntimeException, Error (both of which the
     * specs guarantee that we trap) and arbitrary Throwables.
     * Because we cannot rethrow Throwables within Runnable.run, we
     * wrap them within Errors on the way out (to the thread's
     * UncaughtExceptionHandler).  Any thrown exception also
     * conservatively causes thread to die.
     *
     * 5. After task.run completes, we call afterExecute, which may
     * also throw an exception, which will also cause thread to
     * die. According to JLS Sec 14.20, this exception is the one that
     * will be in effect even if task.run throws.
     *
     * The net effect of the exception mechanics is that afterExecute
     * and the thread's UncaughtExceptionHandler have as accurate
     * information as we can provide about any problems encountered by
     * user code.
     *
     * 这个方法由 {@link Worker#thread} 线程调用 start()方法启动后调用。
     *
     * @param w the worker
     */
    final void runWorker(Worker w) {
        Thread wt = Thread.currentThread();
        // 获取该线程的第一个任务（worker 在初始化的时候可以指定 firstTask）
        Runnable task = w.firstTask;
        w.firstTask = null;
        // 调用 Worker 的 unlock()方法，将 同步状态state 设置为0。这个时候如果有其他线程调用了 线程池的 shutdownNow()方法就会中断当前线程。
        w.unlock(); // allow interrupts
        boolean completedAbruptly = true;
        try {
            /**
             * 通过 while 循环来让线程（{@link Worker#thread}，一个 worker 绑定一个 thread）一直工作。
             * 如果 task 不为空（即，该线程存在第一个要执行的任务）或者【阻塞队列 {@link #workQueue}】中可以获取到任务，那么就一直执行下去。
             */
            while (task != null || (task = getTask()) != null) {
                // 同一个 worker 在同一个时刻只能去执行一个 task。（独占式获取同步状态）
                w.lock();
                // If pool is stopping, ensure thread is interrupted;
                // if not, ensure thread is not interrupted.  This
                // requires a recheck in second case to deal with
                // shutdownNow race while clearing interrupt
                if ((runStateAtLeast(ctl.get(), STOP) ||
                     (Thread.interrupted() &&
                      runStateAtLeast(ctl.get(), STOP))) &&
                    !wt.isInterrupted())
                    wt.interrupt();
                try {
                    // AOP 前置
                    beforeExecute(wt, task);
                    Throwable thrown = null;
                    try {
                        /**
                         * 终于可以执行任务了（执行用户提交到线程池的任务）。这里本质上调用的就是 {@link FutureTask#run()}
                         */
                        task.run();
                    } catch (RuntimeException x) {
                        thrown = x; throw x;
                    } catch (Error x) {
                        thrown = x; throw x;
                    } catch (Throwable x) {
                        thrown = x; throw new Error(x);
                    } finally {
                        // AOP 后置
                        afterExecute(task, thrown);
                    }
                } finally {
                    // 置空 task，准备 getTask() 获取下一个任务
                    task = null;
                    // 累加该worker完成的任务数，也就是统计一下当前的这个 worker 完成了多少个 task（唉，这都要看绩效了么）。
                    w.completedTasks++;
                    // 释放 worker 的独占锁（将 同步状态（state锁）设置为0）。
                    w.unlock();
                }
            }
            // 上面的 while 循环内部，当任务执行过程（task.run()）中抛出异常的话，这一步代码就是执行不到的。
            completedAbruptly = false;
        } finally {
            /**
             * 代码走到这里说明：
             * 1). getTask() 返回 null，也就是说，阻塞队列 workQueue 中已经没有可执行的任务了。
             *     这种情况下，我们要做的事情就是需要将 workerCount（线程池的线程数）进行减 1 操作，这个操作会在 getTask() 方法中完成。
             *     【备】：{@link #addWorker(Runnable, boolean)}，我们是在这个地方的一开始的内层 for 自旋中对 workerCount 进行的加 1 操作。
             * 2). 任务执行过程中发生了异常。
             */
            processWorkerExit(w, completedAbruptly);
        }
    }

    // Public constructors and methods

    /**
     * Creates a new {@code ThreadPoolExecutor} with the given initial
     * parameters and default thread factory and rejected execution handler.
     * It may be more convenient to use one of the {@link Executors} factory
     * methods instead of this general purpose constructor.
     *
     * @param corePoolSize the number of threads to keep in the pool, even
     *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
     * @param maximumPoolSize the maximum number of threads to allow in the
     *        pool
     * @param keepAliveTime when the number of threads is greater than
     *        the core, this is the maximum time that excess idle threads
     *        will wait for new tasks before terminating.
     * @param unit the time unit for the {@code keepAliveTime} argument
     * @param workQueue the queue to use for holding tasks before they are
     *        executed.  This queue will hold only the {@code Runnable}
     *        tasks submitted by the {@code execute} method.
     * @throws IllegalArgumentException if one of the following holds:<br>
     *         {@code corePoolSize < 0}<br>
     *         {@code keepAliveTime < 0}<br>
     *         {@code maximumPoolSize <= 0}<br>
     *         {@code maximumPoolSize < corePoolSize}
     * @throws NullPointerException if {@code workQueue} is null
     */
    public ThreadPoolExecutor(int corePoolSize,
                              int maximumPoolSize,
                              long keepAliveTime,
                              TimeUnit unit,
                              BlockingQueue<Runnable> workQueue) {
        this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
             Executors.defaultThreadFactory(), defaultHandler);
    }

    /**
     * Creates a new {@code ThreadPoolExecutor} with the given initial
     * parameters and default rejected execution handler.
     *
     * @param corePoolSize the number of threads to keep in the pool, even
     *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
     * @param maximumPoolSize the maximum number of threads to allow in the
     *        pool
     * @param keepAliveTime when the number of threads is greater than
     *        the core, this is the maximum time that excess idle threads
     *        will wait for new tasks before terminating.
     * @param unit the time unit for the {@code keepAliveTime} argument
     * @param workQueue the queue to use for holding tasks before they are
     *        executed.  This queue will hold only the {@code Runnable}
     *        tasks submitted by the {@code execute} method.
     * @param threadFactory the factory to use when the executor
     *        creates a new thread
     * @throws IllegalArgumentException if one of the following holds:<br>
     *         {@code corePoolSize < 0}<br>
     *         {@code keepAliveTime < 0}<br>
     *         {@code maximumPoolSize <= 0}<br>
     *         {@code maximumPoolSize < corePoolSize}
     * @throws NullPointerException if {@code workQueue}
     *         or {@code threadFactory} is null
     */
    public ThreadPoolExecutor(int corePoolSize,
                              int maximumPoolSize,
                              long keepAliveTime,
                              TimeUnit unit,
                              BlockingQueue<Runnable> workQueue,
                              ThreadFactory threadFactory) {
        this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
             threadFactory, defaultHandler);
    }

    /**
     * Creates a new {@code ThreadPoolExecutor} with the given initial
     * parameters and default thread factory.
     *
     * @param corePoolSize the number of threads to keep in the pool, even
     *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
     * @param maximumPoolSize the maximum number of threads to allow in the
     *        pool
     * @param keepAliveTime when the number of threads is greater than
     *        the core, this is the maximum time that excess idle threads
     *        will wait for new tasks before terminating.
     * @param unit the time unit for the {@code keepAliveTime} argument
     * @param workQueue the queue to use for holding tasks before they are
     *        executed.  This queue will hold only the {@code Runnable}
     *        tasks submitted by the {@code execute} method.
     * @param handler the handler to use when execution is blocked
     *        because the thread bounds and queue capacities are reached
     * @throws IllegalArgumentException if one of the following holds:<br>
     *         {@code corePoolSize < 0}<br>
     *         {@code keepAliveTime < 0}<br>
     *         {@code maximumPoolSize <= 0}<br>
     *         {@code maximumPoolSize < corePoolSize}
     * @throws NullPointerException if {@code workQueue}
     *         or {@code handler} is null
     */
    public ThreadPoolExecutor(int corePoolSize,
                              int maximumPoolSize,
                              long keepAliveTime,
                              TimeUnit unit,
                              BlockingQueue<Runnable> workQueue,
                              RejectedExecutionHandler handler) {
        this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
             Executors.defaultThreadFactory(), handler);
    }

    /**
     * Creates a new {@code ThreadPoolExecutor} with the given initial
     * parameters.
     *
     * @param corePoolSize the number of threads to keep in the pool, even
     *        if they are idle, unless {@code allowCoreThreadTimeOut} is set
     * @param maximumPoolSize the maximum number of threads to allow in the
     *        pool
     * @param keepAliveTime when the number of threads is greater than
     *        the core, this is the maximum time that excess idle threads
     *        will wait for new tasks before terminating.
     * @param unit the time unit for the {@code keepAliveTime} argument
     * @param workQueue the queue to use for holding tasks before they are
     *        executed.  This queue will hold only the {@code Runnable}
     *        tasks submitted by the {@code execute} method.
     * @param threadFactory the factory to use when the executor
     *        creates a new thread
     * @param handler the handler to use when execution is blocked
     *        because the thread bounds and queue capacities are reached
     * @throws IllegalArgumentException if one of the following holds:<br>
     *         {@code corePoolSize < 0}<br>
     *         {@code keepAliveTime < 0}<br>
     *         {@code maximumPoolSize <= 0}<br>
     *         {@code maximumPoolSize < corePoolSize}
     * @throws NullPointerException if {@code workQueue}
     *         or {@code threadFactory} or {@code handler} is null
     */
    public ThreadPoolExecutor(int corePoolSize,
                              int maximumPoolSize,
                              long keepAliveTime,
                              TimeUnit unit,
                              BlockingQueue<Runnable> workQueue,
                              ThreadFactory threadFactory,
                              RejectedExecutionHandler handler) {
        if (corePoolSize < 0 ||
            maximumPoolSize <= 0 ||
            maximumPoolSize < corePoolSize ||
            keepAliveTime < 0)
            throw new IllegalArgumentException();
        if (workQueue == null || threadFactory == null || handler == null)
            throw new NullPointerException();
        this.acc = System.getSecurityManager() == null ?
                null :
                AccessController.getContext();

        /**
         * 常驻核心线程数（就像是正式工）。如果大于 0，那么即使本地任务执行完成也不会被销毁。
         */
        this.corePoolSize = corePoolSize;

        /**
         * 1). 线程池能够容纳可同时执行的最大线程数。
         * 2). 形象说明：当项目比较紧张时，那么可能就需要加一些临时工。
         *     正式工 人数 + 临时工 人数 <= maximumPoolSize，总人数不能超过 maximumPoolSize，否则就可能会出现调度不开等问题（即，管理混乱，焦头烂额）
         */
        this.maximumPoolSize = maximumPoolSize;

        /**
         * 当请求的线程数大于 {@link #corePoolSize} 时，线程就会进入该阻塞队列。
         * 当队列满了的时候，还有额外请求的话，就需要加临时工了。
         */
        this.workQueue = workQueue;

        /**
         * 1). 线程池中线程空闲的时间，当空闲时间达到该值时，线程就会被销毁，只剩下 corePoolSize 个线程。
         * 2). 形象说明：当一个项目完结时，就【可能】不需要那么多人了。这里【可能】的标准就是 临时工 的空闲时长是否大于 keepAliveTime。
         *     如果大于，那么就说明近期一段时间内项目不紧张了，也就不需要那么多人了，也就意味着不需要临时工了。
         */
        this.keepAliveTime = unit.toNanos(keepAliveTime);

        /**
         * 线程工厂，用来生产一组相同任务的线程。同时也可以通过它增加前缀名，虚拟机栈分析时更清晰。
         */
        this.threadFactory = threadFactory;

        /**
         * 拒绝策略，当 workQueue 达到上限，同时所有的线程数也已经达到 maximumPoolSize 时，就要通过拒绝策略来处理了。
         * 比如，拒绝、丢弃等，这是一种限流的保护措施。
         */
        this.handler = handler;
    }

    /**
     * <pre>
     * Executes the given task sometime in the future.  The task
     * may execute in a new thread or in an existing pooled thread.
     *
     * If the task cannot be submitted for execution, either because this
     * executor has been shutdown or because its capacity has been reached,
     * the task is handled by the current {@code RejectedExecutionHandler}.
     *
     * 任务调度的方法入口。这部分完成的工作是：
     *      检查现在线程池的运行状态、运行线程数、运行策略，决定接下来的执行流程，
     *      是直接申请线程执行呢？还是缓冲到队列中执行呢？还是直接拒绝该任务呢？
     * </pre>
     *
     * @param command the task to execute
     * @throws RejectedExecutionException at discretion of
     *         {@code RejectedExecutionHandler}, if the task
     *         cannot be accepted for execution
     * @throws NullPointerException if {@code command} is null
     *
     */
    public void execute(Runnable command) {
        if (command == null)
            throw new NullPointerException();
        /*
         * Proceed in 3 steps:
         *
         * 1. If fewer than corePoolSize threads are running, try to
         * start a new thread with the given command as its first
         * task.  The call to addWorker atomically checks runState and
         * workerCount, and so prevents false alarms that would add
         * threads when it shouldn't, by returning false.
         *
         * 2. If a task can be successfully queued, then we still need
         * to double-check whether we should have added a thread
         * (because existing ones died since last checking) or that
         * the pool shut down since entry into this method. So we
         * recheck state and if necessary roll back the enqueuing if
         * stopped, or start a new thread if there are none.
         *
         * 3. If we cannot queue task, then we try to add a new
         * thread.  If it fails, we know we are shut down or saturated
         * and so reject the task.
         */
        int c = ctl.get();
        /**
         * 1. 判断 当前线程数 是否小于 核心线程数。如果小于，则创建线程（创建 Worker，Worker中封装了线程）并执行当前任务（从这里也可以看出来，线程池中线程的创建属于懒加载）。
         *    Worker 在执行完任务后，还会循环获取队列中的任务来执行，具体的可以从{@link #runWorker(Worker)}方法中看到。
         */
        if (workerCountOf(c) < corePoolSize) {
            /**
             * 1).直接添加一个 worker 来执行任务。
             *    Worker 会创建一个线程，并把当前任务 command 作为这个线程要执行的第一个任务（firstTask）。
             * 2).添加任务成功后，就结束了。提交任务嘛，线程池已经接受了这个任务，那么这个方法也就可以返回了，
             *    至于执行结果，到时候会包装到 FutureTask 中。
             */
            if (addWorker(command, true))
                return;
            c = ctl.get();
        }
        /**
         * 2. 代码走到这里，说明：当前线程数大于等于核心线程数; 或者上面的 addWorker 失败了。
         *    只有线程池处于 running 状态时，才可以把这个任务添加到任务队列 workQueue 中。
         */
        if (isRunning(c) && workQueue.offer(command)) {
            /**
             * 如果任务被成功添加到了任务队列 workQueue 中，我们是否需要开启新的线程。
             * ① 线程数在 [0, corePoolSize) 是无条件开启新的线程；
             * ② 如果线程数已经大于等于 corePoolSize，那么将任务添加到队列中。
             */

            int recheck = ctl.get();
            /**
             * 如果线程池不处于 running 状态，那么移除已经入队的这个任务，并且执行拒绝策略。
             * 因为上面的 workQueue.offer(command) 将任务添加到工作队列的时候，可能有其他线程来修改线程池的状态，所以，这里才需要重新检查。
             */
            if (! isRunning(recheck) && remove(command))
                reject(command);
            // 如果线程池的状态还是 running，并且线程池中当前的线程数量为 0，那么就开启新的线程
            else if (workerCountOf(recheck) == 0)
                addWorker(null, false);

            // 这段代码的目的是：担心任务提交到队列中了，但是线程都关闭了。
        }
        /**
         * 3. 代码走到这个分支就意味着 线程池的状态不是运行中 或者 workQueue 队列满了。
         *    此时，就需要以 maximumPoolSize 为界创建新的 worker（相当于是临时工了）。
         */
        else if (!addWorker(command, false))
            // 4. 如果创建 worker 失败，那最起码就说明当前线程数已经达到了 maximumPoolSize，这个时候就要执行拒绝策略了。
            reject(command);
    }

    /**
     * <pre>
     * Initiates an orderly shutdown in which previously submitted
     * tasks are executed, but no new tasks will be accepted.
     * 启动有序关闭，执行之前提交的任务，但不接受新任务。
     * Invocation has no additional effect if already shut down.
     *
     * This method does not wait for previously submitted tasks to
     * complete execution.  Use {@link #awaitTermination awaitTermination}
     * to do that.
     *
     * 1.该方法的作用：关闭线程池，等待任务都执行完成。
     *   也就是说，在调用了 shutdown()方法后，线程池就不会再接受新的执行任务了。
     *   但是，在调用 shutdown()方法之前 放入到线程池中的任务（即，任务队列中的任务）还是要继续执行的。
     * 2.解释：
     *      一般在停掉一个程序时，在所谓的钩子方法中会来调用这个方法。也就是说，程序的停止不是【戛然而止】，一般在
     *      停止程序时，还需要我们把程序当中的 线程池中现有的任务 继续执行完毕，再真正结束程序，停止对外提供服务。
     *      在 shutdown 的过程中，如果还有新的请求进来，是不会进行处理的。
     * 钩子方法的用法可以参考《阿里巴巴的分布式事务框架 seata》
     *      工程：seata-core
     *      类名：io.seata.core.rpc.ShutdownHook
     * 【注意】：
     *      当使用 kill -9 pid（程序的进程号）停止服务时，钩子方法是不会被执行的，因为 kill -9 是立即停止，不给程序任何的反应。
     *      所以，我们一般使用 kill pid，这样 jvm 就可以通过接受【信号量】的方式来执行钩子方法。
     * </pre>
     *
     * @throws SecurityException {@inheritDoc}
     */
    public void shutdown() {
        // 获取 线程池的全局锁
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // 检查是否有关闭线程池的权限
            checkShutdownAccess();
            // 将线程池的状态设置为 shutdown
            advanceRunState(SHUTDOWN);
            // 中断 Worker 线程，但是在中断Worker线程之前，会先把任务队列中的任务执行完成。
            interruptIdleWorkers();
            // hook for ScheduledThreadPoolExecutor. 为 ScheduledThreadPoolExecutor 调用钩子方法。
            onShutdown();
        } finally {
            mainLock.unlock();
        }
        // 尝试将线程池的状态变为 terminated
        tryTerminate();
    }

    /**
     * Attempts to stop all actively executing tasks, halts the
     * processing of waiting tasks, and returns a list of the tasks
     * that were awaiting execution. These tasks are drained (removed)
     * from the task queue upon return from this method.
     *
     * <p>This method does not wait for actively executing tasks to
     * terminate.  Use {@link #awaitTermination awaitTermination} to
     * do that.
     *
     * <p>There are no guarantees beyond best-effort attempts to stop
     * processing actively executing tasks.  This implementation
     * cancels tasks via {@link Thread#interrupt}, so any task that
     * fails to respond to interrupts may never terminate.
     *
     * <pre>
     * 这个就跟上面的 {@link #shutdown()} 不同了。
     *    如果调用了 线程池的 shutdownNow()方法，则线程池不会再接受新的执行任务，并且也会将 任务队列 中的任务丢弃，
     *  正在执行的 Worker线程也会被立即中断。同时，方法会立刻返回，该方法的返回值就是 当前任务队列中被丢弃的任务列表。
     * </pre>
     *
     * @throws SecurityException {@inheritDoc}
     */
    public List<Runnable> shutdownNow() {
        List<Runnable> tasks;
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // 检查是否有关闭线程池的权限
            checkShutdownAccess();
            // 将线程池的状态设置为 stop
            advanceRunState(STOP);
            // 中断所有的 Worker线程
            interruptWorkers();
            // 将 任务队列 中的任务 移动到 tasks集合中。
            tasks = drainQueue();
        } finally {
            mainLock.unlock();
        }
        tryTerminate();
        return tasks;
    }

    public boolean isShutdown() {
        return ! isRunning(ctl.get());
    }

    /**
     * Returns true if this executor is in the process of terminating
     * after {@link #shutdown} or {@link #shutdownNow} but has not
     * completely terminated.  This method may be useful for
     * debugging. A return of {@code true} reported a sufficient
     * period after shutdown may indicate that submitted tasks have
     * ignored or suppressed interruption, causing this executor not
     * to properly terminate.
     *
     * @return {@code true} if terminating but not yet terminated
     */
    public boolean isTerminating() {
        int c = ctl.get();
        return ! isRunning(c) && runStateLessThan(c, TERMINATED);
    }

    public boolean isTerminated() {
        return runStateAtLeast(ctl.get(), TERMINATED);
    }

    /**
     * 当线程池 调用了 awaitTermination() 方法后，会阻塞【调用者】所在的线程，直到 线程池的状态 修改为 terminated 或者 超时。
     */
    public boolean awaitTermination(long timeout, TimeUnit unit)
        throws InterruptedException {
        // 获取 距离超时时间 的 剩余时长
        long nanos = unit.toNanos(timeout);
        // 获取线程池的全局锁
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // 自旋
            for (;;) {
                // 当前线程池的状态为 terminated时（在方法tryTerminate()中会将线程池的状态设置为terminated），返回 true
                if (runStateAtLeast(ctl.get(), TERMINATED))
                    return true;
                // 达到超时时间，则返回 false
                if (nanos <= 0)
                    return false;
                // 重置 距离超时时间 的 剩余时长（用超时等待来实现）
                nanos = termination.awaitNanos(nanos);
            }
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Invokes {@code shutdown} when this executor is no longer
     * referenced and it has no threads.
     */
    protected void finalize() {
        SecurityManager sm = System.getSecurityManager();
        if (sm == null || acc == null) {
            shutdown();
        } else {
            PrivilegedAction<Void> pa = () -> { shutdown(); return null; };
            AccessController.doPrivileged(pa, acc);
        }
    }

    /**
     * Sets the thread factory used to create new threads.
     *
     * @param threadFactory the new thread factory
     * @throws NullPointerException if threadFactory is null
     * @see #getThreadFactory
     */
    public void setThreadFactory(ThreadFactory threadFactory) {
        if (threadFactory == null)
            throw new NullPointerException();
        this.threadFactory = threadFactory;
    }

    /**
     * Returns the thread factory used to create new threads.
     *
     * @return the current thread factory
     * @see #setThreadFactory(ThreadFactory)
     */
    public ThreadFactory getThreadFactory() {
        return threadFactory;
    }

    /**
     * Sets a new handler for unexecutable tasks.
     *
     * @param handler the new handler
     * @throws NullPointerException if handler is null
     * @see #getRejectedExecutionHandler
     */
    public void setRejectedExecutionHandler(RejectedExecutionHandler handler) {
        if (handler == null)
            throw new NullPointerException();
        this.handler = handler;
    }

    /**
     * Returns the current handler for unexecutable tasks.
     *
     * @return the current handler
     * @see #setRejectedExecutionHandler(RejectedExecutionHandler)
     */
    public RejectedExecutionHandler getRejectedExecutionHandler() {
        return handler;
    }

    /**
     * Sets the core number of threads.  This overrides any value set
     * in the constructor.  If the new value is smaller than the
     * current value, excess existing threads will be terminated when
     * they next become idle.  If larger, new threads will, if needed,
     * be started to execute any queued tasks.
     *
     * 如果新值小于当前值，则多余的现有线程将在下次空闲时终止。
     * 如果较大，如果需要，将启动新线程以执行任何排队的任务。
     *
     * @param corePoolSize the new core size
     * @throws IllegalArgumentException if {@code corePoolSize < 0}
     * @see #getCorePoolSize
     */
    public void setCorePoolSize(int corePoolSize) {
        if (corePoolSize < 0)
            throw new IllegalArgumentException();
        // new corePoolSize - old corePoolSize，新设置的核心线程数与旧的进行差值计算
        int delta = corePoolSize - this.corePoolSize;
        // 直接替换线程池的 corePoolSize 为 new corePoolSzie
        this.corePoolSize = corePoolSize;
        // 计算线程池当前的线程数量，判断是否大于 new corePoolSize，如果大于，则将多余空闲的线程中断
        if (workerCountOf(ctl.get()) > corePoolSize)
            interruptIdleWorkers();
        // 如果 delta > 0，即 new corePoolSize 大于 old corePoolSize，则说明需要创建新的核心线程。
        else if (delta > 0) {
            // We don't really know how many new threads are "needed".
            // As a heuristic, prestart enough new workers (up to new
            // core size) to handle the current number of tasks in
            // queue, but stop if queue becomes empty while doing so.
            /**
             * Math#min() 方法会返回两个值中小的那个。
             * ① 假设 workQueue.size() == 0，则 k 也等于零，说明阻塞队列为空（即，没有阻塞住的任务需要执行），
             *    k-- > 0 表达式也不会成立，进而也就不会执行 addWorker 方法。
             * ② 假设 workQueue.size() > 0 && < delta，说明此时阻塞队列中有待执行的任务。
             *    k-- > 0 表达式成立，一般情况下会创建 workQueue.size() 个新的核心线程。
             * ③ 假设 workQueue.size() > 0 && > delta，则最多会创建 delta 个新的核心线程。
             */
            int k = Math.min(delta, workQueue.size());
            while (k-- > 0 && addWorker(null, true)) {
                if (workQueue.isEmpty())
                    break;
            }
        }
    }

    /**
     * Returns the core number of threads.
     *
     * @return the core number of threads
     * @see #setCorePoolSize
     */
    public int getCorePoolSize() {
        return corePoolSize;
    }

    /**
     * Starts a core thread, causing it to idly wait for work. This
     * overrides the default policy of starting core threads only when
     * new tasks are executed. This method will return {@code false}
     * if all core threads have already been started.
     *
     * @return {@code true} if a thread was started
     */
    public boolean prestartCoreThread() {
        return workerCountOf(ctl.get()) < corePoolSize &&
            addWorker(null, true);
    }

    /**
     * Same as prestartCoreThread except arranges that at least one
     * thread is started even if corePoolSize is 0.
     */
    void ensurePrestart() {
        // 获取线程池当前的线程 数量
        int wc = workerCountOf(ctl.get());
        if (wc < corePoolSize)
            addWorker(null, true);
        else if (wc == 0)
            addWorker(null, false);
    }

    /**
     * Starts all core threads, causing them to idly wait for work. This
     * overrides the default policy of starting core threads only when
     * new tasks are executed.
     *
     * @return the number of threads started
     */
    public int prestartAllCoreThreads() {
        int n = 0;
        while (addWorker(null, true))
            ++n;
        return n;
    }

    /**
     * Returns true if this pool allows core threads to time out and
     * terminate if no tasks arrive within the keepAlive time, being
     * replaced if needed when new tasks arrive. When true, the same
     * keep-alive policy applying to non-core threads applies also to
     * core threads. When false (the default), core threads are never
     * terminated due to lack of incoming tasks.
     *
     * @return {@code true} if core threads are allowed to time out,
     *         else {@code false}
     *
     * @since 1.6
     */
    public boolean allowsCoreThreadTimeOut() {
        return allowCoreThreadTimeOut;
    }

    /**
     * Sets the policy governing whether core threads may time out and
     * terminate if no tasks arrive within the keep-alive time, being
     * replaced if needed when new tasks arrive. When false, core
     * threads are never terminated due to lack of incoming
     * tasks. When true, the same keep-alive policy applying to
     * non-core threads applies also to core threads. To avoid
     * continual thread replacement, the keep-alive time must be
     * greater than zero when setting {@code true}. This method
     * should in general be called before the pool is actively used.
     *
     * @param value {@code true} if should time out, else {@code false}
     * @throws IllegalArgumentException if value is {@code true}
     *         and the current keep-alive time is not greater than zero
     *
     * @since 1.6
     */
    public void allowCoreThreadTimeOut(boolean value) {
        if (value && keepAliveTime <= 0)
            throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
        if (value != allowCoreThreadTimeOut) {
            allowCoreThreadTimeOut = value;
            if (value)
                interruptIdleWorkers();
        }
    }

    /**
     * Sets the maximum allowed number of threads. This overrides any
     * value set in the constructor. If the new value is smaller than
     * the current value, excess existing threads will be
     * terminated when they next become idle.
     *
     * @param maximumPoolSize the new maximum
     * @throws IllegalArgumentException if the new maximum is
     *         less than or equal to zero, or
     *         less than the {@linkplain #getCorePoolSize core pool size}
     * @see #getMaximumPoolSize
     */
    public void setMaximumPoolSize(int maximumPoolSize) {
        if (maximumPoolSize <= 0 || maximumPoolSize < corePoolSize)
            throw new IllegalArgumentException();
        this.maximumPoolSize = maximumPoolSize;
        // 计算线程池中当前工作的线程数，如果大于新的 maximumPoolSize，则对多余的 worker 发起中断流程。
        if (workerCountOf(ctl.get()) > maximumPoolSize)
            interruptIdleWorkers();
    }

    /**
     * Returns the maximum allowed number of threads.
     *
     * @return the maximum allowed number of threads
     * @see #setMaximumPoolSize
     */
    public int getMaximumPoolSize() {
        return maximumPoolSize;
    }

    /**
     * Sets the time limit for which threads may remain idle before
     * being terminated.  If there are more than the core number of
     * threads currently in the pool, after waiting this amount of
     * time without processing a task, excess threads will be
     * terminated.  This overrides any value set in the constructor.
     *
     * @param time the time to wait.  A time value of zero will cause
     *        excess threads to terminate immediately after executing tasks.
     * @param unit the time unit of the {@code time} argument
     * @throws IllegalArgumentException if {@code time} less than zero or
     *         if {@code time} is zero and {@code allowsCoreThreadTimeOut}
     * @see #getKeepAliveTime(TimeUnit)
     */
    public void setKeepAliveTime(long time, TimeUnit unit) {
        if (time < 0)
            throw new IllegalArgumentException();
        if (time == 0 && allowsCoreThreadTimeOut())
            throw new IllegalArgumentException("Core threads must have nonzero keep alive times");
        long keepAliveTime = unit.toNanos(time);
        long delta = keepAliveTime - this.keepAliveTime;
        this.keepAliveTime = keepAliveTime;
        if (delta < 0)
            interruptIdleWorkers();
    }

    /**
     * Returns the thread keep-alive time, which is the amount of time
     * that threads in excess of the core pool size may remain
     * idle before being terminated.
     *
     * @param unit the desired time unit of the result
     * @return the time limit
     * @see #setKeepAliveTime(long, TimeUnit)
     */
    public long getKeepAliveTime(TimeUnit unit) {
        return unit.convert(keepAliveTime, TimeUnit.NANOSECONDS);
    }

    /* User-level queue utilities */

    /**
     * Returns the task queue used by this executor. Access to the
     * task queue is intended primarily for debugging and monitoring.
     * This queue may be in active use.  Retrieving the task queue
     * does not prevent queued tasks from executing.
     *
     * @return the task queue
     */
    public BlockingQueue<Runnable> getQueue() {
        return workQueue;
    }

    /**
     * Removes this task from the executor's internal queue if it is
     * present, thus causing it not to be run if it has not already
     * started.
     *
     * <p>This method may be useful as one part of a cancellation
     * scheme.  It may fail to remove tasks that have been converted
     * into other forms before being placed on the internal queue. For
     * example, a task entered using {@code submit} might be
     * converted into a form that maintains {@code Future} status.
     * However, in such cases, method {@link #purge} may be used to
     * remove those Futures that have been cancelled.
     *
     * @param task the task to remove
     * @return {@code true} if the task was removed
     */
    public boolean remove(Runnable task) {
        boolean removed = workQueue.remove(task);
        tryTerminate(); // In case SHUTDOWN and now empty
        return removed;
    }

    /**
     * Tries to remove from the work queue all {@link Future}
     * tasks that have been cancelled. This method can be useful as a
     * storage reclamation operation, that has no other impact on
     * functionality. Cancelled tasks are never executed, but may
     * accumulate in work queues until worker threads can actively
     * remove them. Invoking this method instead tries to remove them now.
     * However, this method may fail to remove tasks in
     * the presence of interference by other threads.
     */
    public void purge() {
        final BlockingQueue<Runnable> q = workQueue;
        try {
            Iterator<Runnable> it = q.iterator();
            while (it.hasNext()) {
                Runnable r = it.next();
                if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
                    it.remove();
            }
        } catch (ConcurrentModificationException fallThrough) {
            // Take slow path if we encounter interference during traversal.
            // Make copy for traversal and call remove for cancelled entries.
            // The slow path is more likely to be O(N*N).
            for (Object r : q.toArray())
                if (r instanceof Future<?> && ((Future<?>)r).isCancelled())
                    q.remove(r);
        }

        tryTerminate(); // In case SHUTDOWN and now empty
    }

    /* Statistics */

    /**
     * Returns the current number of threads in the pool.
     *
     * @return the number of threads
     */
    public int getPoolSize() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            // Remove rare and surprising possibility of
            // isTerminated() && getPoolSize() > 0
            return runStateAtLeast(ctl.get(), TIDYING) ? 0
                : workers.size();
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Returns the approximate number of threads that are actively
     * executing tasks.
     *
     * @return the number of threads
     */
    public int getActiveCount() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            int n = 0;
            for (Worker w : workers)
                if (w.isLocked())
                    ++n;
            return n;
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Returns the largest number of threads that have ever
     * simultaneously been in the pool.
     *
     * @return the number of threads
     */
    public int getLargestPoolSize() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            return largestPoolSize;
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Returns the approximate total number of tasks that have ever been
     * scheduled for execution. Because the states of tasks and
     * threads may change dynamically during computation, the returned
     * value is only an approximation.
     *
     * @return the number of tasks
     */
    public long getTaskCount() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            long n = completedTaskCount;
            for (Worker w : workers) {
                n += w.completedTasks;
                if (w.isLocked())
                    ++n;
            }
            return n + workQueue.size();
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Returns the approximate total number of tasks that have
     * completed execution. Because the states of tasks and threads
     * may change dynamically during computation, the returned value
     * is only an approximation, but one that does not ever decrease
     * across successive calls.
     *
     * @return the number of tasks
     */
    public long getCompletedTaskCount() {
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            long n = completedTaskCount;
            for (Worker w : workers)
                n += w.completedTasks;
            return n;
        } finally {
            mainLock.unlock();
        }
    }

    /**
     * Returns a string identifying this pool, as well as its state,
     * including indications of run state and estimated worker and
     * task counts.
     *
     * @return a string identifying this pool, as well as its state
     */
    public String toString() {
        long ncompleted;
        int nworkers, nactive;
        final ReentrantLock mainLock = this.mainLock;
        mainLock.lock();
        try {
            ncompleted = completedTaskCount;
            nactive = 0;
            nworkers = workers.size();
            for (Worker w : workers) {
                ncompleted += w.completedTasks;
                if (w.isLocked())
                    ++nactive;
            }
        } finally {
            mainLock.unlock();
        }
        int c = ctl.get();
        String rs = (runStateLessThan(c, SHUTDOWN) ? "Running" :
                     (runStateAtLeast(c, TERMINATED) ? "Terminated" :
                      "Shutting down"));
        return super.toString() +
            "[" + rs +
            ", pool size = " + nworkers +
            ", active threads = " + nactive +
            ", queued tasks = " + workQueue.size() +
            ", completed tasks = " + ncompleted +
            "]";
    }

    /* Extension hooks */

    /**
     * Method invoked prior to executing the given Runnable in the
     * given thread.  This method is invoked by thread {@code t} that
     * will execute task {@code r}, and may be used to re-initialize
     * ThreadLocals, or to perform logging.
     *
     * <p>This implementation does nothing, but may be customized in
     * subclasses. Note: To properly nest multiple overridings, subclasses
     * should generally invoke {@code super.beforeExecute} at the end of
     * this method.
     *
     * @param t the thread that will run task {@code r}
     * @param r the task that will be executed
     */
    protected void beforeExecute(Thread t, Runnable r) { }

    /**
     * Method invoked upon completion of execution of the given Runnable.
     * This method is invoked by the thread that executed the task. If
     * non-null, the Throwable is the uncaught {@code RuntimeException}
     * or {@code Error} that caused execution to terminate abruptly.
     *
     * <p>This implementation does nothing, but may be customized in
     * subclasses. Note: To properly nest multiple overridings, subclasses
     * should generally invoke {@code super.afterExecute} at the
     * beginning of this method.
     *
     * <p><b>Note:</b> When actions are enclosed in tasks (such as
     * {@link FutureTask}) either explicitly or via methods such as
     * {@code submit}, these task objects catch and maintain
     * computational exceptions, and so they do not cause abrupt
     * termination, and the internal exceptions are <em>not</em>
     * passed to this method. If you would like to trap both kinds of
     * failures in this method, you can further probe for such cases,
     * as in this sample subclass that prints either the direct cause
     * or the underlying exception if a task has been aborted:
     *
     *  <pre> {@code
     * class ExtendedExecutor extends ThreadPoolExecutor {
     *   // ...
     *   protected void afterExecute(Runnable r, Throwable t) {
     *     super.afterExecute(r, t);
     *     if (t == null && r instanceof Future<?>) {
     *       try {
     *         Object result = ((Future<?>) r).get();
     *       } catch (CancellationException ce) {
     *           t = ce;
     *       } catch (ExecutionException ee) {
     *           t = ee.getCause();
     *       } catch (InterruptedException ie) {
     *           Thread.currentThread().interrupt(); // ignore/reset
     *       }
     *     }
     *     if (t != null)
     *       System.out.println(t);
     *   }
     * }}</pre>
     *
     * @param r the runnable that has completed
     * @param t the exception that caused termination, or null if
     * execution completed normally
     */
    protected void afterExecute(Runnable r, Throwable t) { }

    /**
     * Method invoked when the Executor has terminated.  Default
     * implementation does nothing. Note: To properly nest multiple
     * overridings, subclasses should generally invoke
     * {@code super.terminated} within this method.
     */
    protected void terminated() { }

    /* Predefined RejectedExecutionHandlers */

    /**
     * A handler for rejected tasks that runs the rejected task
     * directly in the calling thread of the {@code execute} method,
     * unless the executor has been shut down, in which case the task
     * is discarded.
     *
     * 谁提交的任务就交给谁来处理
     */
    public static class CallerRunsPolicy implements RejectedExecutionHandler {
        /**
         * Creates a {@code CallerRunsPolicy}.
         */
        public CallerRunsPolicy() { }

        /**
         * Executes task r in the caller's thread, unless the executor
         * has been shut down, in which case the task is discarded.
         *
         * @param r the runnable task requested to be executed
         * @param e the executor attempting to execute this task
         */
        public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
            if (!e.isShutdown()) {
                r.run();
            }
        }
    }

    /**
     * A handler for rejected tasks that throws a
     * {@code RejectedExecutionException}.
     *
     * 当触发拒绝策略时，直接抛出拒绝执行的异常，中止策略的意思就是打断当前执行流程。
     */
    public static class AbortPolicy implements RejectedExecutionHandler {
        /**
         * Creates an {@code AbortPolicy}.
         */
        public AbortPolicy() { }

        /**
         * Always throws RejectedExecutionException.
         *
         * @param r the runnable task requested to be executed
         * @param e the executor attempting to execute this task
         * @throws RejectedExecutionException always
         */
        public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
            // 直接抛出异常
            throw new RejectedExecutionException("Task " + r.toString() +
                                                 " rejected from " +
                                                 e.toString());
        }
    }

    /**
     * A handler for rejected tasks that silently discards the
     * rejected task.
     *
     * 直接丢弃提交的任务
     */
    public static class DiscardPolicy implements RejectedExecutionHandler {
        /**
         * Creates a {@code DiscardPolicy}.
         */
        public DiscardPolicy() { }

        /**
         * Does nothing, which has the effect of discarding task r.
         *
         * @param r the runnable task requested to be executed
         * @param e the executor attempting to execute this task
         */
        public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
        }
    }

    /**
     * A handler for rejected tasks that discards the oldest unhandled
     * request and then retries {@code execute}, unless the executor
     * is shut down, in which case the task is discarded.
     *
     * 丢弃队列中最老的任务，然后尝试执行。
     */
    public static class DiscardOldestPolicy implements RejectedExecutionHandler {
        /**
         * Creates a {@code DiscardOldestPolicy} for the given executor.
         */
        public DiscardOldestPolicy() { }

        /**
         * Obtains and ignores the next task that the executor
         * would otherwise execute, if one is immediately available,
         * and then retries execution of task r, unless the executor
         * is shut down, in which case task r is instead discarded.
         *
         * @param r the runnable task requested to be executed
         * @param e the executor attempting to execute this task
         */
        public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
            if (!e.isShutdown()) {
                e.getQueue().poll();
                e.execute(r);
            }
        }
    }
}
