/*
 *  Licensed to the Apache Software Foundation (ASF) under one or more
 *  contributor license agreements.  See the NOTICE file distributed with
 *  this work for additional information regarding copyright ownership.
 *  The ASF licenses this file to You under the Apache License, Version 2.0
 *  (the "License"); you may not use this file except in compliance with
 *  the License.  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */
package org.apache.tomcat.util.net;

import java.io.EOFException;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.SocketAddress;
import java.net.SocketTimeoutException;
import java.nio.ByteBuffer;
import java.nio.channels.CancelledKeyException;
import java.nio.channels.Channel;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.CompletionHandler;
import java.nio.channels.FileChannel;
import java.nio.channels.NetworkChannel;
import java.nio.channels.SelectionKey;
import java.nio.channels.Selector;
import java.nio.channels.ServerSocketChannel;
import java.nio.channels.SocketChannel;
import java.nio.channels.WritableByteChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileAttribute;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.ConcurrentModificationException;
import java.util.Iterator;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;

import javax.net.ssl.SSLEngine;

import org.apache.juli.logging.Log;
import org.apache.juli.logging.LogFactory;
import org.apache.tomcat.util.ExceptionUtils;
import org.apache.tomcat.util.collections.SynchronizedQueue;
import org.apache.tomcat.util.collections.SynchronizedStack;
import org.apache.tomcat.util.compat.JreCompat;
import org.apache.tomcat.util.compat.JrePlatform;
import org.apache.tomcat.util.net.AbstractEndpoint.Handler.SocketState;
import org.apache.tomcat.util.net.Acceptor.AcceptorState;
import org.apache.tomcat.util.net.jsse.JSSESupport;

/**
 * NIO tailored thread pool, providing the following services:
 * <ul>
 * <li>Socket acceptor thread</li>
 * <li>Socket poller thread</li>
 * <li>Worker threads pool</li>
 * </ul>
 *
 * TODO: Consider using the virtual machine's thread pool.
 *
 * @author Mladen Turk
 * @author Remy Maucherat
 */
//I/O多路复用，同步非阻塞
public class NioEndpoint extends AbstractJsseEndpoint<NioChannel,SocketChannel> {


    // -------------------------------------------------------------- Constants


    private static final Log log = LogFactory.getLog(NioEndpoint.class);
    private static final Log logHandshake = LogFactory.getLog(NioEndpoint.class.getName() + ".handshake");


    public static final int OP_REGISTER = 0x100; //register interest op

    // ----------------------------------------------------------------- Fields

    /**
     * Server socket "pointer".
     */
    private volatile ServerSocketChannel serverSock = null;

    /**
     * Stop latch used to wait for poller stop
     * 停止门闩用来等待轮询器停止
     */
    private volatile CountDownLatch stopLatch = null;

    /**
     * Cache for poller events
     * 轮询事件的缓存栈
     */
    private SynchronizedStack<PollerEvent> eventCache;

    /**
     * Bytebuffer cache, each channel holds a set of buffers (two, except for SSL holds four)
     * Bytebuffer缓存，每个channel保存一组缓冲区（两个，SSL保存四个除外）
     */
    private SynchronizedStack<NioChannel> nioChannels;

    private SocketAddress previousAcceptedSocketRemoteAddress = null;
    private long previousAcceptedSocketNanoTime = 0;


    // ------------------------------------------------------------- Properties

    /**
     * Use System.inheritableChannel to obtain channel from stdin/stdout.
     */
    private boolean useInheritedChannel = false;
    public void setUseInheritedChannel(boolean useInheritedChannel) { this.useInheritedChannel = useInheritedChannel; }
    public boolean getUseInheritedChannel() { return useInheritedChannel; }


    /**
     * Path for the Unix domain socket, used to create the socket address.
     * 路径为Unix域套接字,用来创建套接字地址;
     */
    private String unixDomainSocketPath = null;
    public String getUnixDomainSocketPath() { return this.unixDomainSocketPath; }
    public void setUnixDomainSocketPath(String unixDomainSocketPath) {
        this.unixDomainSocketPath = unixDomainSocketPath;
    }


    /**
     * Permissions which will be set on the Unix domain socket if it is created.
     */
    private String unixDomainSocketPathPermissions = null;
    public String getUnixDomainSocketPathPermissions() { return this.unixDomainSocketPathPermissions; }
    public void setUnixDomainSocketPathPermissions(String unixDomainSocketPathPermissions) {
        this.unixDomainSocketPathPermissions = unixDomainSocketPathPermissions;
    }


    /**
     * Priority of the poller thread.
     * 轮询线程的优先级
     */
    private int pollerThreadPriority = Thread.NORM_PRIORITY;
    public void setPollerThreadPriority(int pollerThreadPriority) { this.pollerThreadPriority = pollerThreadPriority; }
    public int getPollerThreadPriority() { return pollerThreadPriority; }


    private long selectorTimeout = 1000;
    public void setSelectorTimeout(long timeout) { this.selectorTimeout = timeout;}
    public long getSelectorTimeout() { return this.selectorTimeout; }

    /**
     * The socket poller.
     */
    private Poller poller = null;


    // --------------------------------------------------------- Public Methods

    /**
     * Number of keep-alive sockets.
     *
     * @return The number of sockets currently in the keep-alive state waiting
     *         for the next request to be received on the socket
     */
    public int getKeepAliveCount() {
        if (poller == null) {
            return 0;
        } else {
            return poller.getKeyCount();
        }
    }


    @Override
    public String getId() {
        if (getUseInheritedChannel()) {
            return "JVMInheritedChannel";
        } else if (getUnixDomainSocketPath() != null) {
            return getUnixDomainSocketPath();
        } else {
            return null;
        }
    }


    // ----------------------------------------------- Public Lifecycle Methods

    /**
     * Initialize the endpoint.
     */
    @Override
    public void bind() throws Exception {
        //初始化ServerSocket
        initServerSocket();
        //设置停止门闩用来等待轮询器停止（初始化值为1保证仅有一个线程执行停止操作）
        setStopLatch(new CountDownLatch(1));

        // Initialize SSL if needed
        //初始化ssl(Secure Socket Layer:安全套接层)
        initialiseSsl();
    }

    // Separated out to make it easier for folks that extend NioEndpoint to
    // implement custom [server]sockets
    // 分离出来，使扩展NioEndpoint的人员更容易实现自定义[服务器]套接字
    // 在选择使用哪种方式时，需要综合考虑服务器部署环境、操作系统支持、性能需求和可移植性等因素。操作系统的channel和UNIX
    // 域套接字常常能够提供更好的性能，因此优先考虑使用它们。当它们不可用或不适用时，Tomcat可以使用自己创建套接字的方式来提供通信功能。
    protected void initServerSocket() throws Exception {
        // 1、操作系统：使用操作系统提供的channel可以获得更高的性能和更好的扩展性。这种方式能够利用操作系统内核级别的优化，减少了数据传输时的拷贝和上下文切换，
        // 从而提高了服务器的吞吐量和响应性能。然而，这种方式需要依赖操作系统提供的特性和支持，因此在某些环境下可能不可用或存在限制。

        // 是否使用操作系统提供的channel,默认为false
        if (getUseInheritedChannel()) {
            // Retrieve the channel provided by the OS
            //获取操作系统提供的Channel
            Channel ic = System.inheritedChannel();
            if (ic instanceof ServerSocketChannel) {
                serverSock = (ServerSocketChannel) ic;
            }
            if (serverSock == null) {
                throw new IllegalArgumentException(sm.getString("endpoint.init.bind.inherited"));
            }
         // 2、UNIX域套接字:使用UNIX域套接字可以提供本地进程间的高性能通信。这种方式适用于部署在同一台物理机器上的进程之间进行通信，避免了通过网络传输数据的开销。
         // 在适当的场景下，UNIX域套接字能够提供更高的性能和更低的延迟。

        // 传统的套接字的用法都是基于TCP/IP协议栈的，需要指定IP地址。如果不同主机上的两个进程进行通信，当然这样做没什么问题。但是，如果只需要在一台机器上的两个不同进程间通信，
        // 还要用到IP地址就有点大材小用了。UNIX域套接字用于在同一台机器上运行的进程之间的通信。虽然因特网域套接字可用于同一目的，但UNIX域套接字的效率更高。
        // UNIX域套接字仅仅复制数据；它们并不执行协议处理，不需要添加或删除网络报头，无需计算检验和，不要产生顺序号，无需发送确认报文

        // Unix域套接字的路径，用于创建套接字地址,默认为null
        } else if (getUnixDomainSocketPath() != null) {
            // 返回给定路径Unix域套接字地址  在绑定一个Unix域套接字时，会在文件系统中的相应位置上创建一个文件，且这个文件的类型被标记为“Socket”，
            // 因此这个文件无法用open()函数打开。当不再需要这个Unix域套接字时，可以使用remove()函数或者unlink()函数将这个对应的文件删除。
            // 如果在文件系统中，已经有了一个文件和指定的路径名相同，则绑定会失败（返回错误EADDRINUSE）。所以，一个套接字只能绑定到一个路径上，
            // 同样的，一个路径也只能被一个套接字绑定
            SocketAddress sa = JreCompat.getInstance().getUnixDomainSocketAddress(getUnixDomainSocketPath());
            // 使用Unix域套接字ProtocolFamily创建服务器套接字通道
            serverSock = JreCompat.getInstance().openUnixDomainServerSocketChannel();
            serverSock.bind(sa, getAcceptCount());
            if (getUnixDomainSocketPathPermissions() != null) {
                Path path = Paths.get(getUnixDomainSocketPath());
                Set<PosixFilePermission> permissions =
                        PosixFilePermissions.fromString(getUnixDomainSocketPathPermissions());
                if (path.getFileSystem().supportedFileAttributeViews().contains("posix")) {
                    FileAttribute<Set<PosixFilePermission>> attrs = PosixFilePermissions.asFileAttribute(permissions);
                    Files.setAttribute(path, attrs.name(), attrs.value());
                } else {
                    java.io.File file = path.toFile();
                    if (permissions.contains(PosixFilePermission.OTHERS_READ) && !file.setReadable(true, false)) {
                        log.warn(sm.getString("endpoint.nio.perms.readFail", file.getPath()));
                    }
                    if (permissions.contains(PosixFilePermission.OTHERS_WRITE) && !file.setWritable(true, false)) {
                        log.warn(sm.getString("endpoint.nio.perms.writeFail", file.getPath()));
                    }
                }
            }
        } else {
            // 实例化ServerSocketChannel(服务端通道)
            serverSock = ServerSocketChannel.open();
            //设置ServerSocket(套接字)的属性信息，可以在server.xml中进行配置
            socketProperties.setProperties(serverSock.socket());
            // getPortWithOffset()默认会返回我们最开始设置的8080，得到我们的服务器address是0.0.0.0:8080
            InetSocketAddress addr = new InetSocketAddress(getAddress(), getPortWithOffset());
            // ServerSocketChannel绑定地址、端口，这样你才可以通过这个访问服务（处理请求）  getAcceptCount()参数表示操作系统的等待队列长度，当应用层面的连接数到达最大值时，
            // 操作系统可以继续接收连接，那么操作系统能继续接收的最大连接数就是这个队列长度，可以通过 acceptCount 参数配置，默认是 100
            serverSock.bind(addr, getAcceptCount());
        }
        // 服务主线程，主要负责链接的请求，为了控制请求的大小 所以这里采用了阻塞模式(之后Acceptor在接收连接的,就是通过该对象来实现的),设置为true的话那么其accept，read等方法
        // 跟BIO一样会进行阻塞 这里设置主要是为了acceptor 如果不阻塞那acceptor就执行完了如何在等着响应请求呢  acceptor阻塞接受请求，接受到之后会将获取到的socketChannel
        // 中的模式设置false
        serverSock.configureBlocking(true); //mimic APR behavior
    }


    /**
     * Start the NIO endpoint, creating acceptor, poller threads.
     */
    @Override
    public void startInternal() throws Exception {
        //避免二次运行的标志
        if (!running) {
            running = true;
            //EndPoint暂停标志
            paused = false;
            // 以下几个均是是缓存用的(每个请求都会去创建SocketProcessorBase,PollerEvent,
            // NioChannel等对象，对象的创建比较耗费性能，通过缓存可以拿出已使用的对象将其属性信息替换成新的即可)
            if (socketProperties.getProcessorCache() != 0) {
                //缓存SocketProcessor(SocketProcessorBase的子类)对象（任务封装对象）
                processorCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
                        socketProperties.getProcessorCache());
            }
            if (socketProperties.getEventCache() != 0) {
                //为poller缓存PollerEvent对象 避免了
                eventCache = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
                        socketProperties.getEventCache());
            }
            //获取实际缓存池个数，可通过socket. 在<connector>中设置  默认情况是获取jvm运行时最大可用内存除以默认读写缓存池大小 从而得到缓存池个数
            //而每个NioChannel都需要一个这样的缓存池来进行数据的读写，那么缓存池的个数就代表了NioChannel的最大个数
            int actualBufferPool =
                    socketProperties.getActualBufferPool(isSSLEnabled() ? getSniParseLimit() * 2 : 0);
            if (actualBufferPool != 0) {
                //NioChannel的缓存栈，NioChannel对SocketChannel封装，使SSL与非SSL对外提供相同的处理方式
                nioChannels = new SynchronizedStack<>(SynchronizedStack.DEFAULT_SIZE,
                        actualBufferPool);
            }

            // Create worker collection
            // 重点：创建Executor  endpoint获取请求后，会交给worker去执行，主线程仅仅是获取请求
            if (getExecutor() == null) {
                createExecutor();
            }
            //设置最大的链接数：8192。由于使用了Latch，因此每来一个链接，maxConnection就减少1.直到为0  当为0的时候，不能再接受多的链接请求了
            //注意这里仅是创建初始化了connectionLimitLatch对象 而LimitLatch.Sync中的资源释放和获取资源的方法是在Acceptor run()中调用的
            initializeConnectionLatch();

            // Start poller thread
            // 打开选择器(selector),Poller是NioEndpoint的一个内部类，它内部维护了一个selector、和一个PollerEvent类型的channel数组。
            // Poller本身是一个Runnable实现类，它在一个死循环里不断检测Channel的数据就绪状态，一旦有Channel可读，
            // 就生成一个SocketProcessor任务对象扔给Executor去处理。
            poller = new Poller();
            Thread pollerThread = new Thread(poller, getName() + "-Poller");
            pollerThread.setPriority(threadPriority);
            pollerThread.setDaemon(true);
            // poller开始工作（核心）
            pollerThread.start();
            // 开启Acceptor Thread专门负责接收socket链接，注意它并不进行socket处理
            startAcceptorThread();
        }
    }


    /**
     * Stop the endpoint. This will cause all processing threads to stop.
     */
    @Override
    public void stopInternal() {
        if (!paused) {
            pause();
        }
        if (running) {
            running = false;
            acceptor.stop(10);
            if (poller != null) {
                poller.destroy();
                poller = null;
            }
            try {
                if (!getStopLatch().await(selectorTimeout + 100, TimeUnit.MILLISECONDS)) {
                    log.warn(sm.getString("endpoint.nio.stopLatchAwaitFail"));
                }
            } catch (InterruptedException e) {
                log.warn(sm.getString("endpoint.nio.stopLatchAwaitInterrupted"), e);
            }
            shutdownExecutor();
            if (eventCache != null) {
                eventCache.clear();
                eventCache = null;
            }
            if (nioChannels != null) {
                NioChannel socket;
                while ((socket = nioChannels.pop()) != null) {
                    socket.free();
                }
                nioChannels = null;
            }
            if (processorCache != null) {
                processorCache.clear();
                processorCache = null;
            }
        }
    }


    /**
     * Deallocate NIO memory pools, and close server socket.
     */
    @Override
    public void unbind() throws Exception {
        if (log.isDebugEnabled()) {
            log.debug("Destroy initiated for " +
                    new InetSocketAddress(getAddress(),getPortWithOffset()));
        }
        if (running) {
            stop();
        }
        try {
            doCloseServerSocket();
        } catch (IOException ioe) {
            getLog().warn(sm.getString("endpoint.serverSocket.closeFailed", getName()), ioe);
        }
        destroySsl();
        super.unbind();
        if (getHandler() != null ) {
            getHandler().recycle();
        }
        if (log.isDebugEnabled()) {
            log.debug("Destroy completed for " +
                    new InetSocketAddress(getAddress(), getPortWithOffset()));
        }
    }


    @Override
    protected void doCloseServerSocket() throws IOException {
        try {
            if (!getUseInheritedChannel() && serverSock != null) {
                // Close server socket
                serverSock.close();
            }
            serverSock = null;
        } finally {
            if (getUnixDomainSocketPath() != null && getBindState().wasBound()) {
                Files.delete(Paths.get(getUnixDomainSocketPath()));
            }
        }
    }


    // ------------------------------------------------------ Protected Methods


    @Override
    protected void unlockAccept() {
        if (getUnixDomainSocketPath() == null) {
            super.unlockAccept();
        } else {
            // Only try to unlock the acceptor if it is necessary
            if (acceptor == null || acceptor.getState() != AcceptorState.RUNNING) {
                return;
            }
            try {
                SocketAddress sa = JreCompat.getInstance().getUnixDomainSocketAddress(getUnixDomainSocketPath());
                try (SocketChannel socket = JreCompat.getInstance().openUnixDomainSocketChannel()) {
                    // With a UDS, expect no delay connecting and no defer accept
                    socket.connect(sa);
                }
                // Wait for up to 1000ms acceptor threads to unlock
                long waitLeft = 1000;
                while (waitLeft > 0 &&
                        acceptor.getState() == AcceptorState.RUNNING) {
                    Thread.sleep(5);
                    waitLeft -= 5;
                }
            } catch(Throwable t) {
                ExceptionUtils.handleThrowable(t);
                if (getLog().isDebugEnabled()) {
                    getLog().debug(sm.getString(
                            "endpoint.debug.unlock.fail", String.valueOf(getPortWithOffset())), t);
                }
            }
        }
    }


    protected SynchronizedStack<NioChannel> getNioChannels() {
        return nioChannels;
    }


    protected Poller getPoller() {
        return poller;
    }


    protected CountDownLatch getStopLatch() {
        return stopLatch;
    }


    protected void setStopLatch(CountDownLatch stopLatch) {
        this.stopLatch = stopLatch;
    }


    /**
     * Process the specified connection.
     * @param socket The socket channel
     * @return <code>true</code> if the socket was correctly configured
     *  and processing may continue, <code>false</code> if the socket needs to be
     *  close immediately
     */
    // 该方法最重要的就是将Acceptor与一个Poller绑定起来，然后两个组件通过队列通信，每个Poller都维护着一个SynchronizedQueue队列，ChannelEvent放入到队列中，
    // 然后Poller从队列中取出事件进行消费
    @Override
    protected boolean setSocketOptions(SocketChannel socket) {
        NioSocketWrapper socketWrapper = null;
        try {
            // Allocate channel and wrapper
            NioChannel channel = null;
            //nioChannels是缓存用的 如果有现有的Channel可以处理，那么先从现有的通道中pop一个出来
            if (nioChannels != null) {
                //从NioChannel的缓存栈中取出一个NioChannel，NioChannel是SocketChannel的一个的包装类
                channel = nioChannels.pop();
            }
            //缓存队列中没有 则新建一个NioChannel,并将SocketChannel关联到NioChannel(存放到NioChannel中的属性中)
            if (channel == null) {
                //准备一个读取Socket数据的处理器(Handler) 主要是创建读和写的两个 buffer，默认地，读和写buffer都是8192字节，8k
                SocketBufferHandler bufhandler = new SocketBufferHandler(
                        socketProperties.getAppReadBufSize(),
                        socketProperties.getAppWriteBufSize(),
                        socketProperties.getDirectBuffer());
                //如果开启了SSL，那么需要创建一个SecureNioChannel，如果没开启SSL，准备一个普通的Channel即可 并且将创建的缓存绑定到channel上
                if (isSSLEnabled()) {
                    channel = new SecureNioChannel(bufhandler, this);
                } else {
                    channel = new NioChannel(bufhandler);
                }
            }
            //将NioChannel(含有buf)和NioEndpoint(含有poller和acceptor)封装到NioSocketWrapper中
            NioSocketWrapper newWrapper = new NioSocketWrapper(channel, this);
            //如果是缓存则对channel中的属性进行重置 更换成当前的管道   如果是新建那么将当前channel设置到属性中
            channel.reset(socket, newWrapper);
            //以socket为key，将当前的连接存到connections集合中
            connections.put(socket, newWrapper);
            socketWrapper = newWrapper;

            // Set socket properties
            // Disable blocking, polling will be used
            // 设置该SocketChannel为非阻塞模式，供Poller调用
            // 注意：一个Channel要注册到Selector中，需要先将这个Channel设置成非阻塞模式。 否则的话selector在处理该channel时如果执行accept或者read时会阻塞
            // 那么其他channel准备就绪后就无法获取selector来进行处理
            socket.configureBlocking(false);
            if (getUnixDomainSocketPath() == null) {
                //设置Socket参数值（从server.xml的Connector节点上获取参数值）比如Socket发送、接收的缓存大小、心跳检测等
                socketProperties.setProperties(socket.socket());
            }

            socketWrapper.setReadTimeout(getConnectionTimeout());
            socketWrapper.setWriteTimeout(getConnectionTimeout());
            socketWrapper.setKeepAliveLeft(NioEndpoint.this.getMaxKeepAliveRequests());
            //这里将封装好的NioSocketWrapper包装为一个PollerEvent，然后添加到events队列中
            poller.register(socketWrapper);
            return true;
        } catch (Throwable t) {
            ExceptionUtils.handleThrowable(t);
            try {
                log.error(sm.getString("endpoint.socketOptionsError"), t);
            } catch (Throwable tt) {
                ExceptionUtils.handleThrowable(tt);
            }
            if (socketWrapper == null) {
                destroySocket(socket);
            }
        }
        // Tell to close the socket if needed
        return false;
    }


    @Override
    protected void destroySocket(SocketChannel socket) {
        //释放占有的数量资源
        countDownConnection();
        try {
            //将socket进行关闭
            socket.close();
        } catch (IOException ioe) {
            if (log.isDebugEnabled()) {
                log.debug(sm.getString("endpoint.err.close"), ioe);
            }
        }
    }


    @Override
    protected NetworkChannel getServerSocket() {
        return serverSock;
    }


    @Override
    protected SocketChannel serverSocketAccept() throws Exception {
        // Acceptor在启动后会阻塞在此处，当有新连接到达时，该方法返回一个 SocketChannel。
        // 通常情况下，NetworkChannel的accept()方法是非阻塞的。当调用accept()方法时，如果没有新的连接到达，该方法会立即返回null或0，
        // 而不会阻塞当前线程。这使得程序可以继续执行其他任务，而不必一直等待新的连接。然而，如果设置了Channel的“阻塞模式”（blocking mode），
        // 那么accept()方法可能会阻塞当前线程，直到有新的连接到达。这种情况下，accept()方法会一直等待新的连接，直到有连接到达或发生超时，
        // 才会返回一个新的Channel对象   而Acceptor中使用的serverSock对象在初始化时通过serverSock.configureBlocking(true)设置成了阻塞式

        // 注意：每一个请求均会产生一个SocketChannel对象，再F12中的网络中每一条数据均是一个请求，所以我们访问了一个url后但tomcat会接受到大于1个请求数量
        // 比如每次访问url时都会伴随着http://localhost:8080/favicon.ico，这也是一个url
        // 每一个请求均是一个SocketChannel 而SocketChannel在poller中会被封装到一个sockeWrapperBase的任务中放入线程池启动，那么这就意味着每一个请求都是相互独立的，
        // 都会有自己的processo实例 都会有自己的inputBuf和outPutBuff
        SocketChannel result = serverSock.accept();

        // Bug does not affect Windows platform and Unix Domain Socket. Skip the check.
        // Bug不会影响Windows平台和Unix域套接字。跳过检查
        if (!JrePlatform.IS_WINDOWS && getUnixDomainSocketPath() == null) {
            SocketAddress currentRemoteAddress = result.getRemoteAddress();
            long currentNanoTime = System.nanoTime();
            if (currentRemoteAddress.equals(previousAcceptedSocketRemoteAddress) &&
                    currentNanoTime - previousAcceptedSocketNanoTime < 1000) {
                throw new IOException(sm.getString("endpoint.err.duplicateAccept"));
            }
            previousAcceptedSocketRemoteAddress = currentRemoteAddress;
            previousAcceptedSocketNanoTime = currentNanoTime;
        }

        return result;
    }


    @Override
    protected Log getLog() {
        return log;
    }


    @Override
    protected SocketProcessorBase<NioChannel> createSocketProcessor(
            SocketWrapperBase<NioChannel> socketWrapper, SocketEvent event) {
        return new SocketProcessor(socketWrapper, event);
    }

    // ----------------------------------------------------- Poller Inner Classes

    /**
     * PollerEvent, cacheable object for poller events to avoid GC
     * 往Poller对象的事件队列插入的待处理的事件的抽象，可以被Poller缓存循环回收利用以避免GC成本
     */
    public static class PollerEvent {

        private NioSocketWrapper socketWrapper;
        // 表示要处理的事件类型:NioEndpoint.OP_REGISTER: 通道注册事件
        // SelectionKey.OP_READ: 通道重新声明在Poller上关注读事件
        // SelectionKey.OP_WRITE:通道重新声明在Poller上关注写事件
        private int interestOps;

        public PollerEvent(NioSocketWrapper socketWrapper, int intOps) {
            reset(socketWrapper, intOps);
        }

        public void reset(NioSocketWrapper socketWrapper, int intOps) {
            this.socketWrapper = socketWrapper;
            interestOps = intOps;
        }

        public NioSocketWrapper getSocketWrapper() {
            return socketWrapper;
        }

        public int getInterestOps() {
            return interestOps;
        }

        public void reset() {
            reset(null, 0);
        }

        @Override
        public String toString() {
            return "Poller event: socket [" + socketWrapper.getSocket() + "], socketWrapper [" + socketWrapper +
                    "], interestOps [" + interestOps + "]";
        }
    }

    /**
     * Poller class.
     */
    public class Poller implements Runnable {

        private Selector selector;
        // PollerEvent事件队列,同步队列(线程安全),因为对PollerEvent的操作牵涉到多线程,所以才用同步队列。
        // 比如该队列事件的注册者和该队列事件消费者可能是不同的线程，更具体的来讲，tomcat的
        // 连接请求接收线程acceptor接收到连接后就会把连接套接字注册到Poller的该事件队列，
        // 而poller线程本身也在一直运行并消费该事件队列，这里提到的是两个不同的线程在操作同一个
        // 队列对象，所以要用同步队列。
        //
        // 该事件队列中的事件会在该Poller实例线程中执行死循环被消费和处理
        private final SynchronizedQueue<PollerEvent> events =
                new SynchronizedQueue<>();
        // 记录当前poller轮询器是否被通知要关闭轮询线程  销毁时
        private volatile boolean close = false;
        // Optimize expiration handling
        //当前连接到此Poller上的socket超时的时限点。
        private long nextExpiration = 0;
        //1、告诉Poller当前有多少个新连接，这样当Poller进行selector的操作时，能够选择是否须要堵塞来等待读写请求到达。
        //2、标识Poller在进行select选择时。是否有连接到达。假设有，就让当前的堵塞调用马上返回
        private AtomicLong wakeupCounter = new AtomicLong(0);
        //注冊到Poller的channel中，I/O状态已经OK的的个数
        private volatile int keyCount = 0;

        public Poller() throws IOException {
            // 每个poller开启一个Selector，所以本质上Poller就是Selector
            this.selector = Selector.open();
        }

        public int getKeyCount() { return keyCount; }

        public Selector getSelector() { return selector; }

        /**
         * Destroy the poller.
         * 设计给当前轮询器poller所属的NioEndpoint实例使用，用于关闭该轮询器
         */
        protected void destroy() {
            // Wait for polltime before doing anything, so that the poller threads
            // exit, otherwise parallel closure of sockets which are still
            // in the poller can cause problems
            close = true;
            selector.wakeup();
        }
        // Poller线程进入阻塞select时，会将wakeupCounter设置为-1
        // 外部线程添加event时，将wakeupCounter加1。加到0表示poller正在阻塞，需要唤醒poller（selectNow过程中也是-1，尽管这时候不需要唤醒，但是唤醒也没有什么问题）
        private void addEvent(PollerEvent event) {
            // 插入队列
            events.offer(event);
            //如果+1后等于0，说明之前为-1 处在阻塞状态，现在有事件进入，那么直接唤醒
            if (wakeupCounter.incrementAndGet() == 0) {
                // 唤醒Selector,使一个还未返回的select()方法立即返回
                selector.wakeup();
            }
        }

        private PollerEvent createPollerEvent(NioSocketWrapper socketWrapper, int interestOps) {
            PollerEvent r = null;
            if (eventCache != null) {
                // eventCache是当前Poller实例所属NioEndpoint实例的PollerEvent循环回收缓存，
                // eventCache存在的目的是为了循环回收使用用过的PollerEvent对象，降低GC成本
                r = eventCache.pop();
            }
            if (r == null) {
                // 如果没有可循环回收使用的PollerEvent对象则新建一个，否则重用循环回收缓存中获取的PollerEvent对象
                r = new PollerEvent(socketWrapper, interestOps);
            } else {
                //将缓存中的PollerEvent对象中的属性替换为当前信息
                r.reset(socketWrapper, interestOps);
            }
            return r;
        }

        /**
         * Add specified socket and associated pool to the poller. The socket will
         * be added to a temporary array, and polled first after a maximum amount
         * of time equal to pollTime (in most cases, latency will be much lower,
         * however).
         *
         * @param socketWrapper to add to the poller
         * @param interestOps Operations for which to register this socket with
         *                    the Poller
         */
        public void add(NioSocketWrapper socketWrapper, int interestOps) {
            PollerEvent pollerEvent = createPollerEvent(socketWrapper, interestOps);
            // 往队列中放入待处理事件PollerEvent
            addEvent(pollerEvent);
            if (close) {
                processSocket(socketWrapper, SocketEvent.STOP, false);
            }
        }

        /**
         * Processes events in the event queue of the Poller.
         * 处理PollerEvent事件队列中的所有事件
         * @return <code>true</code> if some events were processed,
         *   <code>false</code> if queue was empty
         *   队列中有需要处理的事件则返回true，否则返回false
         */
        //就是不断的查看队列中是否有PollerEvent事件，如果有的话就将其取出然后把里面的Channel取出来注册到该Selector中
        public boolean events() {
            // 用于标记本次方法调用是否存在需要处理的PollerEvent事件
            boolean result = false;
            // PollerEvent的作用：缓存Poller事件，避免GC回收
            PollerEvent pe = null;
            // 从队列中循环取出PollerEvent并处理，直到队列中所有的事件都被处理完
            for (int i = 0, size = events.size(); i < size && (pe = events.poll()) != null; i++ ) {
                //队列中只要存在任何一个事件被处理则当前方法返回true
                result = true;
                // 从PollerEvent中得到NioSocketWrapper
                NioSocketWrapper socketWrapper = pe.getSocketWrapper();
                // NioSocketWrapper继承了SocketWrapperBase，它里面的Socket属性即封装时的NioChannel对象
                // 而请求对应的SocketChannel又被封装到了NioChannel对象中的sc属性中
                SocketChannel sc = socketWrapper.getSocket().getIOChannel();
                //表示要处理的事件类型
                int interestOps = pe.getInterestOps();
                //如果channel不存在，那么它外层封装类关闭
                if (sc == null) {
                    log.warn(sm.getString("endpoint.nio.nullSocketChannel"));
                    socketWrapper.close();
                //如果socket第一次注册到selector中，完成对socket读事件的注册
                } else if (interestOps == OP_REGISTER) {
                    try {
                        // 把NioSocketWrapper注册到selector中，操作是读操作 注意socketWrapper在封装成pollerEvent对象之前就已经添加了读的感兴趣事件
                        // 这里的读操作是绑定到SelectionKey上的而非socketWrapper上 他俩操作是同步的，更新时也是一块更新823
                        // 此处的socketWrapper对象会绑定到SelectionKey.attachment附加物上
                        sc.register(getSelector(), SelectionKey.OP_READ, socketWrapper);
                    } catch (Exception x) {
                        log.error(sm.getString("endpoint.nio.registerFail"), x);
                    }
                } else {
                    // 到这里说明socket之前已经注册到了selector中，寻找selector中的SelectionKey
                    // (是一个注册抽象类，表示SocketChannel和Selector之间的注册关系，可理解为连接Channel以及Selector)，
                    // 更新socket所感兴趣的事件，如果key为空，则socketWrapper.close
                    final SelectionKey key = sc.keyFor(getSelector());
                    if (key == null) {
                        // The key was cancelled (e.g. due to socket closure)
                        // and removed from the selector while it was being
                        // processed. Count down the connections at this point
                        // since it won't have been counted down when the socket
                        // closed.
                        socketWrapper.close();
                    } else {
                        // 通过SelectionKey得到NioSocketWrapper用于进行后续处理
                        final NioSocketWrapper attachment = (NioSocketWrapper) key.attachment();
                        if (attachment != null) {
                            // We are registering the key to start with, reset the fairness counter.
                            try {
                                //将interestOps合并到key现有关注的事件集合中（比如channel开始关注的为read  之后在关注wirte 进行两者的合并）
                                int ops = key.interestOps() | interestOps;
                                //更新key和附加对象关注的操作
                                attachment.interestOps(ops);
                                key.interestOps(ops);
                            } catch (CancelledKeyException ckx) {
                                socketWrapper.close();
                            }
                        } else {
                            socketWrapper.close();
                        }
                    }
                }
                //如果缓存开启，则把当前的pollerEvent事件放入缓存 默认关闭
                if (running && eventCache != null) {
                    //属性置空
                    pe.reset();
                    eventCache.push(pe);
                }
            }

            return result;
        }

        /**
         * Registers a newly created socket with the poller.
         *
         * @param socketWrapper The socket wrapper
         */
        public void register(final NioSocketWrapper socketWrapper) {
            //设置SocketWrapper感兴趣的事件为READ事件[channel感兴趣事件]
            socketWrapper.interestOps(SelectionKey.OP_READ);//this is what OP_REGISTER turns into.
            //注意在封装成pollerEvent时，设置的感兴趣事件为OP_REGISTER 其目的就是将当前channel注册到selector上
            PollerEvent pollerEvent = createPollerEvent(socketWrapper, OP_REGISTER);
            //添加至pollerevent缓存栈
            addEvent(pollerEvent);
        }

        /**
         * The background thread that adds sockets to the Poller, checks the
         * poller for triggered events and hands the associated socket off to an
         * appropriate processor as events occur.
         */
        // Poller会创建SocketProcessor任务类交给线程池处理，而SocketProcessor实现了Runnable接口，用来定义Executor中线程所执行的任务，
        // 主要就是调用Http11Processor组件来处理请求。Http11Processor读取Channel的数据来生成ServletRequest对象，这里请你注意：
        // Http11Processor并不是直接读取Channel的。这是因为Tomcat支持同步非阻塞I/O模型和异步I/O模型，在Java API中，相应的Channel类也是不一样的，
        // 比如有AsynchronousSocketChannel和SocketChannel，为了对Http11Processor屏蔽这些差异，Tomcat设计了一个包装类叫作NioSocketWrapper，
        // Http11Processor只调用SocketWrapper的方法去读写数据。

        //Poller线程在其run方法的每遍运行过程中。会调用timeout方法来检查当前连接的socket，是否达到了超时的时限，假设达到了超时的时限。则告诉client连接超时。
        @Override
        public void run() {
            // Loop until destroy() is called
            while (true) {

                boolean hasEvents = false;
                //使用selector.select(selectorTimeout)的优点：
                //1:资源利用和调度平衡： 使用 select(selectorTimeout) 允许 Event Loop 有时间去处理其他任务，如管理连接、请求处理等。如果总是使用 selectNow()，将不断地轮询检查是否有就绪的通道，这可能导致 CPU 持续高占用，浪费资源。通过引入延迟，Event Loop 能够在通道无事可做时释放 CPU 时间，提高整体系统性能。
                //2:避免忙等待： selectNow()的调用是非阻塞的，会立即返回当前就绪的通道数量，如果没有通道就绪，会返回 0。如果不加任何延迟，Event Loop 可能会连续不断地执行 selectNow()，形成所谓的忙等待，这会消耗 CPU 资源，降低系统性能。
                //3:避免频繁上下文切换： 如果selectNow()被频繁调用，会导致频繁的上下文切换，这会浪费时间在线程间切换的开销上。通过引入适度的延迟，可以减少上下文切换的频率。
                try {
                    if (!close) {
                        // 如果Poller线程未关闭，查询Poller线程中同步队列SynchronizedQueue<PollerEvent>中的是否有事件。
                        // 如果存在的话，events()方法从PollerEvent中取出NioSocketWrapper注册到Poller线程中的selector中
                        hasEvents = events();
                        // private AtomicLong wakeupCounter = new AtomicLong(0);
                        // getAndSet: 通过CAS的方式将给定的值设置新的值，并且返回旧的值

                        // 作用一。帮助Poller选择select方法  在run运行时，当前已经有了5个channel注冊到Poller上。
                        // 所以wakeupCounter.getAndSet(-1) > 0 条件满足，Poller调用selector的非堵塞模式的select方法被调用

                        // 这里将wakeupCounter设置为-1，进入select。如果原先的值大于0，表示有线程添加事件，进入selectNow，线程下一轮循环处理event否则阻塞select
                        // 如果这一步getAndSet > 0，一定意味着有event要处理吗？并不是，可能第一步就处理完了。但不要紧，目的是确保不能有event等着不能被处理
                        // 如果getAndSet == 0，就一定意味着没有event要处理，可以放心阻塞的select
                        if (wakeupCounter.getAndSet(-1) > 0) {
                            // If we are here, means we have other stuff to do
                            // Do a non blocking select
                            // wakeupCounter大于0，意味着Acceptor接收了大量连接，产生大量PollerEvent急需Poller
                            // 消费处理，此时进行一次非阻塞调用，直接跟操作系统交互，获取是否存在准备就绪的channel个数
                            keyCount = selector.selectNow();
                        // 作用二，让当前堵塞的select方法马上返回  在run运行时，假设当前没有channel注冊到Poller上，wakeupCounter.getAndSet(-1) > 0
                        // 条件不满足。但wakeupCounter的值已经被设为-1了。Poller调用堵塞的select方法。在这期间，假设有新的channel注冊进来，
                        // 则 wakeupCounter.incrementAndGet() == 0条件满足。select.wakeup方法被调用。让 selector.select(selectorTimeout)方法马上返回。
                        } else {
                            // 正常情况下wakeupCounter.getAndSet(-1)返回初始值0，然后做一个阻塞时间1秒的select操作,阻塞1s后会继续往下执行
                            // Poller线程selector.select(selectorTimeout)默认超时是1秒钟，如果socket注册事件要延迟一秒执行肯定是不可忍受的。
                            // 外部线程跟Poller线程通过wakeupCounter配合使得事件能够尽快执行。wakeupCounter初始值是0，表示没有外部线程添加了event；
                            // -1表示poller正在阻塞select中，如果外部线程添加event时发现poller在阻塞，需要唤醒poller  这里分了两种情况：1：阻塞1s后再去执行select方法
                            //向操作系统为每一个注册到该selector的channel查询他们感兴趣的事件是否准备就绪,如果存在则将其加入到selector对象中的SelectionKeys集合中
                            // 2:在阻塞1s内，被唤醒（添加pollerEvent事件），那么直接跟操作系统交互，获取是否存在就绪事件
                            keyCount = selector.select(selectorTimeout);
                        }
                        // 第三步，退出select，将wakeupCounter设置为0。因为poller并没有阻塞，不需要外部线程来唤醒
                        // (-1代表其阻塞，只要继续往下执行就需要调整该值使其大于-1，代表着线程正在执行) 在往下继续执行的过程中假如有新的请求到达，
                        // 在经过addEvent方法后wakeupCounter会自增+1 使其大于0
                        wakeupCounter.set(0);
                    }
                    if (close) {
                        // 收到结束通知，poller线程停止前先处理掉PollerEvent队列中的事件
                        events();
                        // poller关闭前的超时处理
                        timeout(0, false);
                        try {
                            // 结束Java NIO selector，也就是关闭接收和处理服务
                            selector.close();
                        } catch (IOException ioe) {
                            log.error(sm.getString("endpoint.nio.selectorCloseFail"), ioe);
                        }
                        // 被通知结束并且处理完收尾工作，现在结束整个线程的while-loop
                        break;
                    }
                    // Either we timed out or we woke up, process events first
                    // 这里是当没有IO就绪事件时，尽早再处理下event。也无关紧要，keyCount代表着要处理的就绪channel的个数，
                    // 因为是多线程，这里是进一步判断是否存在pollerEvent事件 以便后面在处理超时时更准确
                    if (keyCount == 0) {
                        hasEvents = (hasEvents | events());
                    }
                } catch (Throwable x) {
                    ExceptionUtils.handleThrowable(x);
                    log.error(sm.getString("endpoint.nio.selectorLoopError"), x);
                    continue;
                }
                //不断轮询所有注册到selector中的Channel查看是否有事件发生  获取当前选择器中所有注册的“选择键(已就绪的监听事件)”
                //keyCount代表着channel就绪的个数，如果不为0，那么selectedKeys集合中一定存在就绪事件，直接获取 如果为0说明不存在就绪事件，那么直接为null
                Iterator<SelectionKey> iterator =
                    keyCount > 0 ? selector.selectedKeys().iterator() : null;
                // Walk through the collection of ready keys and dispatch
                // any active event.
                while (iterator != null && iterator.hasNext()) {
                    SelectionKey sk = iterator.next();
                    //将当前就绪事件从selectedKeys集合中移除，该集合代表着要处理的就绪事件，处理完成后需要剔除，避免重复处理
                    iterator.remove();
                    // 从SelectionKey中取回附属物，NioSocketWrapper继承了SocketWrapperBase，强转成NioSocketWrapper
                    NioSocketWrapper socketWrapper = (NioSocketWrapper) sk.attachment();
                    // Attachment may be null if another thread has called
                    // cancelledKey()
                    if (socketWrapper != null) {
                        // 对就绪事件进行处理，其实真正的处理都委托给了worker线程
                        processKey(sk, socketWrapper);
                    }
                }

                // Process timeouts 正常运行中处理超时 keyCount就绪事件个数  hasEvents事件个事
                timeout(keyCount,hasEvents);
            }

            getStopLatch().countDown();
        }

        protected void processKey(SelectionKey sk, NioSocketWrapper socketWrapper) {
            try {
                if (close) {
                    // 被通知关闭了，对参数SelectionKey执行取消处理
                    socketWrapper.close();
                    // 如果参数SelectionKey有效
                } else if (sk.isValid()) {
                    //判断SelectionKey是否可读？是否可写？
                    if (sk.isReadable() || sk.isWritable()) {
                        // sendfile默认为null(参考：https://blog.csdn.net/qq_32868023/article/details/128228243)
                        // 通过零拷贝的方式发送文件到client，提高了文件下载的速度
                        // 如果使用需要配置一些参数 response过程中可能会初始化该对象信息 Http11Processor#prepareSendfile
                        if (socketWrapper.getSendfileData() != null) {
                            // 发送文件的处理
                            processSendfile(sk, socketWrapper, false);
                        } else {
                            // 接下来就是处理SocketChannel进来的数据，先取消监听就绪的事件 避免重复读  readyOps()获取通道已经准备就绪的操作的集合
                            //比如selector 对Achannel感兴趣的事件为读和写 而A准备好的就绪事件为写 那么需要将写从感兴趣事件中剔除仅保留读兴趣事件
                            unreg(sk, socketWrapper, sk.readyOps());
                            boolean closeSocket = false;
                            // Read goes before write
                            // 先处理读
                            if (sk.isReadable()) {
                                //如果不为空，表示当前有读操作正在处理 那么将SocketWrapperBase对象直接扔到线程池中处理
                                if (socketWrapper.readOperation != null) {
                                    if (!socketWrapper.readOperation.process()) {
                                        closeSocket = true;
                                    }
                                //当前channel对应的read的事件是否阻塞
                                } else if (socketWrapper.readBlocking) {
                                    // readBlocking默认为false
                                    synchronized (socketWrapper.readLock) {
                                        socketWrapper.readBlocking = false;
                                        //如果阻塞就将其唤醒 notifyAll()唤醒多个线程
                                        socketWrapper.readLock.notify();
                                    }
                                    // 处理Socket NIO读操作 processSocket()是所属NioEndpoint实例的方法，方法实现位于类 AbstractEndpoint。
                                    // 如果有线程池，他会将具体操作交给SocketProcessor和线程池完成,
                                    // 如果没有线程池，他会将具体操作交给SocketProcessor和当前线程完成
                                } else if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
                                    // 处理失败，需要关闭参数SelectionKey对应的套接字通道
                                    closeSocket = true;
                                }
                            }
                            // 后处理写
                            if (!closeSocket && sk.isWritable()) {
                                //如果不为空，则表示当前有写操作正在处理
                                if (socketWrapper.writeOperation != null) {
                                    if (!socketWrapper.writeOperation.process()) {
                                        closeSocket = true;
                                    }
                                //是否阻塞 如果是将其唤醒
                                } else if (socketWrapper.writeBlocking) {
                                    synchronized (socketWrapper.writeLock) {
                                        socketWrapper.writeBlocking = false;
                                        socketWrapper.writeLock.notify();
                                    }
                                    // 处理Socket NIO写操作 processSocket()是所属NioEndpoint实例的方法，如果有线程池，他会将具体操作交给SocketProcessor和线程池完成,
                                    // 如果没有线程池，他会将具体操作交给SocketProcessor和当前线程完成,
                                } else if (!processSocket(socketWrapper, SocketEvent.OPEN_WRITE, true)) {
                                    // 处理失败，需要关闭参数SelectionKey对应的套接字通道
                                    closeSocket = true;
                                }
                            }
                            if (closeSocket) {
                                // 处理失败，需要关闭参数SelectionKey对应的套接字通道，对其执行取消操作
                                socketWrapper.close();
                            }
                        }
                    }
                } else {
                    // Invalid key 对于无效的SelectionKey，做取消操作
                    socketWrapper.close();
                }
            } catch (CancelledKeyException ckx) {
                // 出现异常，作取消操作
                socketWrapper.close();
            } catch (Throwable t) {
                ExceptionUtils.handleThrowable(t);
                log.error(sm.getString("endpoint.nio.keyProcessingError"), t);
            }
        }

        public SendfileState processSendfile(SelectionKey sk, NioSocketWrapper socketWrapper,
                boolean calledByProcessor) {
            NioChannel sc = null;
            try {
                unreg(sk, socketWrapper, sk.readyOps());
                SendfileData sd = socketWrapper.getSendfileData();

                if (log.isTraceEnabled()) {
                    log.trace("Processing send file for: " + sd.fileName);
                }

                if (sd.fchannel == null) {
                    // Setup the file channel
                    File f = new File(sd.fileName);
                    @SuppressWarnings("resource") // Closed when channel is closed
                    FileInputStream fis = new FileInputStream(f);
                    sd.fchannel = fis.getChannel();
                }

                // Configure output channel
                sc = socketWrapper.getSocket();
                // TLS/SSL channel is slightly different
                WritableByteChannel wc = ((sc instanceof SecureNioChannel) ? sc : sc.getIOChannel());

                // We still have data in the buffer
                if (sc.getOutboundRemaining() > 0) {
                    if (sc.flushOutbound()) {
                        socketWrapper.updateLastWrite();
                    }
                } else {
                    //核心：通过transferTo方法将文件写到socket，这里面会利用到操作系统提供的零拷贝优化
                    long written = sd.fchannel.transferTo(sd.pos, sd.length, wc);
                    if (written > 0) {
                        sd.pos += written;
                        sd.length -= written;
                        socketWrapper.updateLastWrite();
                    } else {
                        // Unusual not to be able to transfer any bytes
                        // Check the length was set correctly
                        if (sd.fchannel.size() <= sd.pos) {
                            throw new IOException(sm.getString("endpoint.sendfile.tooMuchData"));
                        }
                    }
                }
                if (sd.length <= 0 && sc.getOutboundRemaining()<=0) {
                    if (log.isDebugEnabled()) {
                        log.debug("Send file complete for: " + sd.fileName);
                    }
                    socketWrapper.setSendfileData(null);
                    try {
                        sd.fchannel.close();
                    } catch (Exception ignore) {
                    }
                    // For calls from outside the Poller, the caller is
                    // responsible for registering the socket for the
                    // appropriate event(s) if sendfile completes.
                    if (!calledByProcessor) {
                        switch (sd.keepAliveState) {
                        case NONE: {
                            if (log.isDebugEnabled()) {
                                log.debug("Send file connection is being closed");
                            }
                            socketWrapper.close();
                            break;
                        }
                        case PIPELINED: {
                            if (log.isDebugEnabled()) {
                                log.debug("Connection is keep alive, processing pipe-lined data");
                            }
                            if (!processSocket(socketWrapper, SocketEvent.OPEN_READ, true)) {
                                socketWrapper.close();
                            }
                            break;
                        }
                        case OPEN: {
                            if (log.isDebugEnabled()) {
                                log.debug("Connection is keep alive, registering back for OP_READ");
                            }
                            reg(sk, socketWrapper, SelectionKey.OP_READ);
                            break;
                        }
                        }
                    }
                    return SendfileState.DONE;
                } else {
                    if (log.isDebugEnabled()) {
                        log.debug("OP_WRITE for sendfile: " + sd.fileName);
                    }
                    if (calledByProcessor) {
                        add(socketWrapper, SelectionKey.OP_WRITE);
                    } else {
                        reg(sk, socketWrapper, SelectionKey.OP_WRITE);
                    }
                    return SendfileState.PENDING;
                }
            } catch (IOException e) {
                if (log.isDebugEnabled()) {
                    log.debug("Unable to complete sendfile request:", e);
                }
                if (!calledByProcessor && sc != null) {
                    socketWrapper.close();
                }
                return SendfileState.ERROR;
            } catch (Throwable t) {
                log.error(sm.getString("endpoint.sendfile.error"), t);
                if (!calledByProcessor && sc != null) {
                    socketWrapper.close();
                }
                return SendfileState.ERROR;
            }
        }

        protected void unreg(SelectionKey sk, NioSocketWrapper socketWrapper, int readyOps) {
            // This is a must, so that we don't have multiple threads messing with the socket
            // 从sk.interestOps()中清除readyOps对应的就绪事件
            reg(sk, socketWrapper, sk.interestOps() & (~readyOps));
        }

        protected void reg(SelectionKey sk, NioSocketWrapper socketWrapper, int intops) {
            //将更新后的兴趣事件重新注册到sk及其附属NioSocketWrapper中
            sk.interestOps(intops);
            socketWrapper.interestOps(intops);
        }

        protected void timeout(int keyCount, boolean hasEvents) {
            long now = System.currentTimeMillis();
            // This method is called on every loop of the Poller. Don't process
            // timeouts on every loop of the Poller since that would create too
            // much load and timeouts can afford to wait a few seconds.
            // Poller线程的每个运行循环loop中都会调用该方法，但是不要每个循环loop中都要真正
            // 处理超时，因为这会增加很多工作量，而且已经发生了的超时timeout稍微多等个几秒钟也能承受。
            // 但是，在以下几种情况下必须要处理超时 :
            // However, do process timeouts if any of the following are true:
            // - the selector simply timed out (suggests there isn't much load)
            // - the nextExpiration time has passed
            // - the server socket is being closed
            if (nextExpiration > 0 && (keyCount > 0 || hasEvents) && (now < nextExpiration) && !close) {
                // 判断是否不需要处理超时，不需要处理的话直接返回
                return;
            }
            //timeout 现在要处理超时了
            int keycount = 0;
            try {
                for (SelectionKey key : selector.keys()) {
                    keycount++;
                    NioSocketWrapper socketWrapper = (NioSocketWrapper) key.attachment();
                    try {
                        if (socketWrapper == null) {
                            // We don't support any keys without attachments
                            if (key.isValid()) {
                                key.cancel();
                            }
                        } else if (close) {
                            // 如果poller关闭了，那么key也会取消注册IO事件
                            key.interestOps(0);
                            // Avoid duplicate stop calls
                            socketWrapper.interestOps(0);
                            socketWrapper.close();
                        } else if (socketWrapper.interestOpsHas(SelectionKey.OP_READ) ||
                                  socketWrapper.interestOpsHas(SelectionKey.OP_WRITE)) {
                            boolean readTimeout = false;
                            boolean writeTimeout = false;
                            // Check for read timeout
                            // 计算是否读数据超时了
                            if (socketWrapper.interestOpsHas(SelectionKey.OP_READ)) {
                                long delta = now - socketWrapper.getLastRead();
                                long timeout = socketWrapper.getReadTimeout();
                                if (timeout > 0 && delta > timeout) {
                                    readTimeout = true;
                                }
                            }
                            // Check for write timeout
                            // 计算是否写数据超时了
                            if (!readTimeout && socketWrapper.interestOpsHas(SelectionKey.OP_WRITE)) {
                                long delta = now - socketWrapper.getLastWrite();
                                long timeout = socketWrapper.getWriteTimeout();
                                if (timeout > 0 && delta > timeout) {
                                    writeTimeout = true;
                                }
                            }
                            // 读超时或者写超时，都会移除这个key
                            if (readTimeout || writeTimeout) {
                                key.interestOps(0);
                                // Avoid duplicate timeout calls
                                socketWrapper.interestOps(0);
                                socketWrapper.setError(new SocketTimeoutException());
                                if (readTimeout && socketWrapper.readOperation != null) {
                                    if (!socketWrapper.readOperation.process()) {
                                        socketWrapper.close();
                                    }
                                } else if (writeTimeout && socketWrapper.writeOperation != null) {
                                    if (!socketWrapper.writeOperation.process()) {
                                        socketWrapper.close();
                                    }
                                } else if (!processSocket(socketWrapper, SocketEvent.ERROR, true)) {
                                    socketWrapper.close();
                                }
                            }
                        }
                    } catch (CancelledKeyException ckx) {
                        if (socketWrapper != null) {
                            socketWrapper.close();
                        }
                    }
                }
            } catch (ConcurrentModificationException cme) {
                // See https://bz.apache.org/bugzilla/show_bug.cgi?id=57943
                log.warn(sm.getString("endpoint.nio.timeoutCme"), cme);
            }
            // For logging purposes only
            long prevExp = nextExpiration;
            nextExpiration = System.currentTimeMillis() +
                    socketProperties.getTimeoutInterval();
            if (log.isTraceEnabled()) {
                log.trace("timeout completed: keys processed=" + keycount +
                        "; now=" + now + "; nextExpiration=" + prevExp +
                        "; keyCount=" + keyCount + "; hasEvents=" + hasEvents +
                        "; eval=" + ((now < prevExp) && (keyCount>0 || hasEvents) && (!close) ));
            }

        }
    }

    // --------------------------------------------------- Socket Wrapper Class

    public static class NioSocketWrapper extends SocketWrapperBase<NioChannel> {

        private final SynchronizedStack<NioChannel> nioChannels;
        private final Poller poller;

        private int interestOps = 0;
        private volatile SendfileData sendfileData = null;
        private volatile long lastRead = System.currentTimeMillis();
        private volatile long lastWrite = lastRead;

        private final Object readLock;
        private volatile boolean readBlocking = false;
        private final Object writeLock;
        private volatile boolean writeBlocking = false;

        public NioSocketWrapper(NioChannel channel, NioEndpoint endpoint) {
            super(channel, endpoint);
            if (endpoint.getUnixDomainSocketPath() != null) {
                // Pretend localhost for easy compatibility
                localAddr = "127.0.0.1";
                localName = "localhost";
                localPort = 0;
                remoteAddr = "127.0.0.1";
                remoteHost = "localhost";
                remotePort = 0;
            }
            nioChannels = endpoint.getNioChannels();
            poller = endpoint.getPoller();
            socketBufferHandler = channel.getBufHandler();
            readLock = (readPending == null) ? new Object() : readPending;
            writeLock = (writePending == null) ? new Object() : writePending;
        }

        public Poller getPoller() { return poller; }
        public int interestOps() { return interestOps; }
        public int interestOps(int ops) { this.interestOps  = ops; return ops; }
        public boolean interestOpsHas(int targetOp) {
            return (this.interestOps() & targetOp) == targetOp;
        }

        public void setSendfileData(SendfileData sf) { this.sendfileData = sf;}
        public SendfileData getSendfileData() { return this.sendfileData; }

        public void updateLastWrite() { lastWrite = System.currentTimeMillis(); }
        public long getLastWrite() { return lastWrite; }
        public void updateLastRead() { lastRead = System.currentTimeMillis(); }
        public long getLastRead() { return lastRead; }

        @Override
        public boolean isReadyForRead() throws IOException {
            socketBufferHandler.configureReadBufferForRead();

            if (socketBufferHandler.getReadBuffer().remaining() > 0) {
                return true;
            }

            fillReadBuffer(false);

            boolean isReady = socketBufferHandler.getReadBuffer().position() > 0;
            return isReady;
        }


        @Override
        public int read(boolean block, byte[] b, int off, int len) throws IOException {
            int nRead = populateReadBuffer(b, off, len);
            if (nRead > 0) {
                return nRead;
                /*
                 * Since more bytes may have arrived since the buffer was last
                 * filled, it is an option at this point to perform a
                 * non-blocking read. However correctly handling the case if
                 * that read returns end of stream adds complexity. Therefore,
                 * at the moment, the preference is for simplicity.
                 */
            }

            // Fill the read buffer as best we can.
            nRead = fillReadBuffer(block);
            updateLastRead();

            // Fill as much of the remaining byte array as possible with the
            // data that was just read
            if (nRead > 0) {
                socketBufferHandler.configureReadBufferForRead();
                nRead = Math.min(nRead, len);
                socketBufferHandler.getReadBuffer().get(b, off, nRead);
            }
            return nRead;
        }


        @Override
        public int read(boolean block, ByteBuffer to) throws IOException {
            int nRead = populateReadBuffer(to);
            if (nRead > 0) {
                return nRead;
                /*
                 * Since more bytes may have arrived since the buffer was last
                 * filled, it is an option at this point to perform a
                 * non-blocking read. However correctly handling the case if
                 * that read returns end of stream adds complexity. Therefore,
                 * at the moment, the preference is for simplicity.
                 */
            }

            // The socket read buffer capacity is socket.appReadBufSize
            int limit = socketBufferHandler.getReadBuffer().capacity();
            if (to.remaining() >= limit) {
                to.limit(to.position() + limit);
                nRead = fillReadBuffer(block, to);
                if (log.isDebugEnabled()) {
                    log.debug("Socket: [" + this + "], Read direct from socket: [" + nRead + "]");
                }
                updateLastRead();
            } else {
                // Fill the read buffer as best we can.
                nRead = fillReadBuffer(block);
                if (log.isDebugEnabled()) {
                    log.debug("Socket: [" + this + "], Read into buffer: [" + nRead + "]");
                }
                updateLastRead();

                // Fill as much of the remaining byte array as possible with the
                // data that was just read
                if (nRead > 0) {
                    nRead = populateReadBuffer(to);
                }
            }
            return nRead;
        }


        @Override
        protected void doClose() {
            if (log.isDebugEnabled()) {
                log.debug("Calling [" + getEndpoint() + "].closeSocket([" + this + "])");
            }
            try {
                getEndpoint().connections.remove(getSocket().getIOChannel());
                if (getSocket().isOpen()) {
                    getSocket().close(true);
                }
                if (getEndpoint().running) {
                    if (nioChannels == null || !nioChannels.push(getSocket())) {
                        getSocket().free();
                    }
                }
            } catch (Throwable e) {
                ExceptionUtils.handleThrowable(e);
                if (log.isDebugEnabled()) {
                    log.error(sm.getString("endpoint.debug.channelCloseFail"), e);
                }
            } finally {
                socketBufferHandler = SocketBufferHandler.EMPTY;
                nonBlockingWriteBuffer.clear();
                reset(NioChannel.CLOSED_NIO_CHANNEL);
            }
            try {
                SendfileData data = getSendfileData();
                if (data != null && data.fchannel != null && data.fchannel.isOpen()) {
                    data.fchannel.close();
                }
            } catch (Throwable e) {
                ExceptionUtils.handleThrowable(e);
                if (log.isDebugEnabled()) {
                    log.error(sm.getString("endpoint.sendfile.closeError"), e);
                }
            }
        }

        private int fillReadBuffer(boolean block) throws IOException {
            socketBufferHandler.configureReadBufferForWrite();
            return fillReadBuffer(block, socketBufferHandler.getReadBuffer());
        }


        private int fillReadBuffer(boolean block, ByteBuffer buffer) throws IOException {
            int n = 0;
            if (getSocket() == NioChannel.CLOSED_NIO_CHANNEL) {
                throw new ClosedChannelException();
            }
            if (block) {
                long timeout = getReadTimeout();
                long startNanos = 0;
                do {
                    if (startNanos > 0) {
                        long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                        if (elapsedMillis == 0) {
                            elapsedMillis = 1;
                        }
                        timeout -= elapsedMillis;
                        if (timeout <= 0) {
                            throw new SocketTimeoutException();
                        }
                    }
                    n = getSocket().read(buffer);
                    if (n == -1) {
                        throw new EOFException();
                    } else if (n == 0) {
                        if (!readBlocking) {
                            readBlocking = true;
                            registerReadInterest();
                        }
                        synchronized (readLock) {
                            if (readBlocking) {
                                try {
                                    if (timeout > 0) {
                                        startNanos = System.nanoTime();
                                        readLock.wait(timeout);
                                    } else {
                                        readLock.wait();
                                    }
                                } catch (InterruptedException e) {
                                    // Continue
                                }
                            }
                        }
                    }
                } while (n == 0); // TLS needs to loop as reading zero application bytes is possible
            } else {
                //非阻塞读，如果没有数据直接返回0，不会进行阻塞  channel.read
                n = getSocket().read(buffer);
                if (n == -1) {
                    throw new EOFException();
                }
            }
            return n;
        }


        @Override
        protected boolean flushNonBlocking() throws IOException {
            boolean dataLeft = socketOrNetworkBufferHasDataLeft();

            // Write to the socket, if there is anything to write
            if (dataLeft) {
                doWrite(false);
                dataLeft = socketOrNetworkBufferHasDataLeft();
            }

            if (!dataLeft && !nonBlockingWriteBuffer.isEmpty()) {
                dataLeft = nonBlockingWriteBuffer.write(this, false);

                if (!dataLeft && socketOrNetworkBufferHasDataLeft()) {
                    doWrite(false);
                    dataLeft = socketOrNetworkBufferHasDataLeft();
                }
            }

            return dataLeft;
        }


        /*
         * https://bz.apache.org/bugzilla/show_bug.cgi?id=66076
         *
         * When using TLS an additional buffer is used for the encrypted data
         * before it is written to the network. It is possible for this network
         * output buffer to contain data while the socket write buffer is empty.
         *
         * For NIO with non-blocking I/O, this case is handling by ensuring that
         * flush only returns false (i.e. no data left to flush) if all buffers
         * are empty.
         */
        private boolean socketOrNetworkBufferHasDataLeft() {
            return !socketBufferHandler.isWriteBufferEmpty() || getSocket().getOutboundRemaining() > 0;
        }


        @Override
        protected void doWrite(boolean block, ByteBuffer buffer) throws IOException {
            int n = 0;
            if (getSocket() == NioChannel.CLOSED_NIO_CHANNEL) {
                throw new ClosedChannelException();
            }
            if (block) {
                if (previousIOException != null) {
                    /*
                     * Socket has previously timed out.
                     *
                     * Blocking writes assume that buffer is always fully
                     * written so there is no code checking for incomplete
                     * writes, retaining the unwritten data and attempting to
                     * write it as part of a subsequent write call.
                     *
                     * Because of the above, when a timeout is triggered we need
                     * to skip subsequent attempts to write as otherwise it will
                     * appear to the client as if some data was dropped just
                     * before the connection is lost. It is better if the client
                     * just sees the dropped connection.
                     */
                    throw new IOException(previousIOException);
                }
                long timeout = getWriteTimeout();
                long startNanos = 0;
                do {
                    if (startNanos > 0) {
                        long elapsedMillis = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startNanos);
                        if (elapsedMillis == 0) {
                            elapsedMillis = 1;
                        }
                        timeout -= elapsedMillis;
                        if (timeout <= 0) {
                            previousIOException = new SocketTimeoutException();
                            throw previousIOException;
                        }
                    }
                    n = getSocket().write(buffer);
                    if (n == 0 && (buffer.hasRemaining() || getSocket().getOutboundRemaining() > 0)) {
                        // n == 0 could be an incomplete write but it could also
                        // indicate that a previous incomplete write of the
                        // outbound buffer (for TLS) has now completed. Only
                        // block if there is still data to write.
                        writeBlocking = true;
                        registerWriteInterest();
                        synchronized (writeLock) {
                            if (writeBlocking) {
                                try {
                                    if (timeout > 0) {
                                        startNanos = System.nanoTime();
                                        writeLock.wait(timeout);
                                    } else {
                                        writeLock.wait();
                                    }
                                } catch (InterruptedException e) {
                                    // Continue
                                }
                                writeBlocking = false;
                            }
                        }
                    } else if (startNanos > 0) {
                        // If something was written, reset timeout
                        timeout = getWriteTimeout();
                        startNanos = 0;
                    }
                } while (buffer.hasRemaining() || getSocket().getOutboundRemaining() > 0);
            } else {
                do {
                    n = getSocket().write(buffer);
                } while (n > 0 && buffer.hasRemaining());
                // If there is data left in the buffer the socket will be registered for
                // write further up the stack. This is to ensure the socket is only
                // registered for write once as both container and user code can trigger
                // write registration.
            }
            updateLastWrite();
        }


        @Override
        public void registerReadInterest() {
            if (log.isDebugEnabled()) {
                log.debug(sm.getString("endpoint.debug.registerRead", this));
            }
            //把socket包装对象注册OP_READ事件，并添加到poller线程的事件队列里，让poller线程继续监听client端可读事件
            getPoller().add(this, SelectionKey.OP_READ);
        }


        @Override
        public void registerWriteInterest() {
            if (log.isDebugEnabled()) {
                log.debug(sm.getString("endpoint.debug.registerWrite", this));
            }
            getPoller().add(this, SelectionKey.OP_WRITE);
        }


        @Override
        public SendfileDataBase createSendfileData(String filename, long pos, long length) {
            return new SendfileData(filename, pos, length);
        }


        @Override
        public SendfileState processSendfile(SendfileDataBase sendfileData) {
            setSendfileData((SendfileData) sendfileData);
            SelectionKey key = getSocket().getIOChannel().keyFor(getPoller().getSelector());
            // Might as well do the first write on this thread
            return getPoller().processSendfile(key, this, true);
        }


        @Override
        protected void populateRemoteAddr() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                InetAddress inetAddr = sc.socket().getInetAddress();
                if (inetAddr != null) {
                    remoteAddr = inetAddr.getHostAddress();
                }
            }
        }


        @Override
        protected void populateRemoteHost() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                InetAddress inetAddr = sc.socket().getInetAddress();
                if (inetAddr != null) {
                    remoteHost = inetAddr.getHostName();
                    if (remoteAddr == null) {
                        remoteAddr = inetAddr.getHostAddress();
                    }
                }
            }
        }


        @Override
        protected void populateRemotePort() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                remotePort = sc.socket().getPort();
            }
        }


        @Override
        protected void populateLocalName() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                InetAddress inetAddr = sc.socket().getLocalAddress();
                if (inetAddr != null) {
                    localName = inetAddr.getHostName();
                }
            }
        }


        @Override
        protected void populateLocalAddr() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                InetAddress inetAddr = sc.socket().getLocalAddress();
                if (inetAddr != null) {
                    localAddr = inetAddr.getHostAddress();
                }
            }
        }


        @Override
        protected void populateLocalPort() {
            SocketChannel sc = getSocket().getIOChannel();
            if (sc != null) {
                localPort = sc.socket().getLocalPort();
            }
        }


        @Override
        public SSLSupport getSslSupport() {
            if (getSocket() instanceof SecureNioChannel) {
                SecureNioChannel ch = (SecureNioChannel) getSocket();
                return ch.getSSLSupport();
            }
            return null;
        }


        @Override
        public void doClientAuth(SSLSupport sslSupport) throws IOException {
            SecureNioChannel sslChannel = (SecureNioChannel) getSocket();
            SSLEngine engine = sslChannel.getSslEngine();
            if (!engine.getNeedClientAuth()) {
                // Need to re-negotiate SSL connection
                engine.setNeedClientAuth(true);
                sslChannel.rehandshake(getEndpoint().getConnectionTimeout());
                ((JSSESupport) sslSupport).setSession(engine.getSession());
            }
        }


        @Override
        public void setAppReadBufHandler(ApplicationBufferHandler handler) {
            getSocket().setAppReadBufHandler(handler);
        }

        @Override
        protected <A> OperationState<A> newOperationState(boolean read,
                ByteBuffer[] buffers, int offset, int length,
                BlockingMode block, long timeout, TimeUnit unit, A attachment,
                CompletionCheck check, CompletionHandler<Long, ? super A> handler,
                Semaphore semaphore, VectoredIOCompletionHandler<A> completion) {
            return new NioOperationState<>(read, buffers, offset, length, block,
                    timeout, unit, attachment, check, handler, semaphore, completion);
        }

        private class NioOperationState<A> extends OperationState<A> {
            private volatile boolean inline = true;
            private NioOperationState(boolean read, ByteBuffer[] buffers, int offset, int length,
                    BlockingMode block, long timeout, TimeUnit unit, A attachment, CompletionCheck check,
                    CompletionHandler<Long, ? super A> handler, Semaphore semaphore,
                    VectoredIOCompletionHandler<A> completion) {
                super(read, buffers, offset, length, block,
                        timeout, unit, attachment, check, handler, semaphore, completion);
            }

            @Override
            protected boolean isInline() {
                return inline;
            }

            @Override
            protected boolean hasOutboundRemaining() {
                return getSocket().getOutboundRemaining() > 0;
            }

            @Override
            public void run() {
                // Perform the IO operation
                // Called from the poller to continue the IO operation
                long nBytes = 0;
                if (getError() == null) {
                    try {
                        synchronized (this) {
                            if (!completionDone) {
                                // This filters out same notification until processing
                                // of the current one is done
                                if (log.isDebugEnabled()) {
                                    log.debug("Skip concurrent " + (read ? "read" : "write") + " notification");
                                }
                                return;
                            }
                            if (read) {
                                // Read from main buffer first
                                if (!socketBufferHandler.isReadBufferEmpty()) {
                                    // There is still data inside the main read buffer, it needs to be read first
                                    socketBufferHandler.configureReadBufferForRead();
                                    for (int i = 0; i < length && !socketBufferHandler.isReadBufferEmpty(); i++) {
                                        nBytes += transfer(socketBufferHandler.getReadBuffer(), buffers[offset + i]);
                                    }
                                }
                                if (nBytes == 0) {
                                    nBytes = getSocket().read(buffers, offset, length);
                                    updateLastRead();
                                }
                            } else {
                                boolean doWrite = true;
                                // Write from main buffer first
                                if (socketOrNetworkBufferHasDataLeft()) {
                                    // There is still data inside the main write buffer, it needs to be written first
                                    socketBufferHandler.configureWriteBufferForRead();
                                    do {
                                        nBytes = getSocket().write(socketBufferHandler.getWriteBuffer());
                                    } while (socketOrNetworkBufferHasDataLeft() && nBytes > 0);
                                    if (socketOrNetworkBufferHasDataLeft()) {
                                        doWrite = false;
                                    }
                                    // Preserve a negative value since it is an error
                                    if (nBytes > 0) {
                                        nBytes = 0;
                                    }
                                }
                                if (doWrite) {
                                    long n = 0;
                                    do {
                                        n = getSocket().write(buffers, offset, length);
                                        if (n == -1) {
                                            nBytes = n;
                                        } else {
                                            nBytes += n;
                                        }
                                    } while (n > 0);
                                    updateLastWrite();
                                }
                            }
                            if (nBytes != 0 || (!buffersArrayHasRemaining(buffers, offset, length) &&
                                    (read || !socketOrNetworkBufferHasDataLeft()))) {
                                completionDone = false;
                            }
                        }
                    } catch (IOException e) {
                        setError(e);
                    }
                }
                if (nBytes > 0 || (nBytes == 0 && !buffersArrayHasRemaining(buffers, offset, length) &&
                        (read || !socketOrNetworkBufferHasDataLeft()))) {
                    // The bytes processed are only updated in the completion handler
                    completion.completed(Long.valueOf(nBytes), this);
                } else if (nBytes < 0 || getError() != null) {
                    IOException error = getError();
                    if (error == null) {
                        error = new EOFException();
                    }
                    completion.failed(error, this);
                } else {
                    // As soon as the operation uses the poller, it is no longer inline
                    inline = false;
                    if (read) {
                        registerReadInterest();
                    } else {
                        registerWriteInterest();
                    }
                }
            }
        }
    }


    // ---------------------------------------------- SocketProcessor Inner Class

    /**
     * This class is the equivalent of the Worker, but will simply use in an
     * external Executor thread pool.
     */
    protected class SocketProcessor extends SocketProcessorBase<NioChannel> {

        public SocketProcessor(SocketWrapperBase<NioChannel> socketWrapper, SocketEvent event) {
            super(socketWrapper, event);
        }
        //socketWrapper,event即NioSocketWrapper对象和其感兴趣的事件
        @Override
        protected void doRun() {
            /*
             * Do not cache and re-use the value of socketWrapper.getSocket() in
             * this method. If the socket closes the value will be updated to
             * CLOSED_NIO_CHANNEL and the previous value potentially re-used for
             * a new connection. That can result in a stale cached value which
             * in turn can result in unintentionally closing currently active
             * connections.
             * 不要在此方法中缓存和重用socketWrapper.getSocket（）的值。如果套接字关闭，则该值将更新为CLOSED_NIO_CHANNEL，
             * 并且以前的值可能会重新用于新连接。这可能会导致缓存值过时，进而导致无意中关闭当前活动的连接。
             */
            Poller poller = NioEndpoint.this.poller;
            if (poller == null) {
                socketWrapper.close();
                return;
            }

            try {
                // 这里的handshake是用来标记https的握手完成情况，如果是http不需要该握手阶段，在从c1处直接置为0
                int handshake = -1;
                try {
                    if (socketWrapper.getSocket().isHandshakeComplete()) { // c1
                        // No TLS handshaking required. Let the handler
                        // process this socket / event combination.
                        // 是否已经握手成功，不需要TLS(加密)握手，就让处理器对socket和event的组合进行处理。
                        handshake = 0;
                    } else if (event == SocketEvent.STOP || event == SocketEvent.DISCONNECT ||
                            event == SocketEvent.ERROR) {
                        // Unable to complete the TLS handshake. Treat it as
                        // if the handshake failed.
                        // 如果不能完成TLS握手过程，标记握手失败
                        handshake = -1;
                    } else {
                        // 处理https的SecureNioChannel覆写了该handshake()方法 返回注册的SelectionKey(ssl[安全接口层]握手过程)
                        handshake = socketWrapper.getSocket().handshake(event == SocketEvent.OPEN_READ, event == SocketEvent.OPEN_WRITE);
                        // The handshake process reads/writes from/to the
                        // socket. status may therefore be OPEN_WRITE once
                        // the handshake completes. However, the handshake
                        // happens when the socket is opened so the status
                        // must always be OPEN_READ after it completes. It
                        // is OK to always set this as it is only used if
                        // the handshake completes.
                        // 握手过程从套接字读取数据/向套接字写入数据。因此一旦握手完成，状态就可以是OPEN_WRITE。
                        // 但是，握手发生在套接字打开时，因此握手完成后状态必须始终为OPEN_READ。始终设置此选项是可以的，因为它仅在握手完成时使用
                        event = SocketEvent.OPEN_READ;
                    }
                } catch (IOException x) {
                    handshake = -1;
                    if (logHandshake.isDebugEnabled()) {
                        logHandshake.debug(sm.getString("endpoint.err.handshake",
                                socketWrapper.getRemoteAddr(), Integer.toString(socketWrapper.getRemotePort())), x);
                    }
                } catch (CancelledKeyException ckx) {
                    handshake = -1;
                }
                if (handshake == 0) { // 握手完成
                    // 标记Socket状态
                    SocketState state = SocketState.OPEN;
                    // Process the request from this socket
                    // 处理来自此套接字的请求 默认处理打开读请求
                    if (event == null) {
                        // c2
                        state = getHandler().process(socketWrapper, SocketEvent.OPEN_READ);
                    } else {
                        state = getHandler().process(socketWrapper, event);
                    }
                    if (state == SocketState.CLOSED) {
                        // 否则关闭通道
                        socketWrapper.close();
                    }
                // 握手失败则关闭通道
                } else if (handshake == -1 ) {
                    getHandler().process(socketWrapper, SocketEvent.CONNECT_FAIL);
                    socketWrapper.close();
                } else if (handshake == SelectionKey.OP_READ){ // TLS（安全传输层）会走到这里
                    // 如果是SelectionKey.OP_READ，也就是读事件的话，就将OP_READ事件设置到socketWrapper
                    socketWrapper.registerReadInterest();
                } else if (handshake == SelectionKey.OP_WRITE){
                    // 如果是SelectionKey.OP_WRITE，也就是写事件的话，就将OP_WRITE事件设置到socketWrapper
                    socketWrapper.registerWriteInterest();
                }
            } catch (CancelledKeyException cx) {
                socketWrapper.close();
            } catch (VirtualMachineError vme) {
                ExceptionUtils.handleThrowable(vme);
            } catch (Throwable t) {
                log.error(sm.getString("endpoint.processing.fail"), t);
                socketWrapper.close();
            } finally {
                //置空，以便jvm回收
                socketWrapper = null;
                event = null;
                //return to cache
                if (running && processorCache != null) {
                    //如果缓存开启则将SocketProcessor放回缓存中
                    processorCache.push(this);
                }
            }
        }

    }


    // ----------------------------------------------- SendfileData Inner Class

    /**
     * SendfileData class.
     */
    public static class SendfileData extends SendfileDataBase {

        public SendfileData(String filename, long pos, long length) {
            super(filename, pos, length);
        }

        protected volatile FileChannel fchannel;
    }
}
