/*
 * Copyright 2012 The Netty Project
 *
 * The Netty Project licenses this file to you under the Apache License,
 * version 2.0 (the "License"); you may not use this file except in compliance
 * with the License. You may obtain a copy of the License at:
 *
 *   http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations
 * under the License.
 */
package io.netty.handler.stream;

import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.Unpooled;
import io.netty.channel.Channel;
import io.netty.channel.ChannelDuplexHandler;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.ChannelProgressivePromise;
import io.netty.channel.ChannelPromise;
import io.netty.util.ReferenceCountUtil;
import io.netty.util.internal.logging.InternalLogger;
import io.netty.util.internal.logging.InternalLoggerFactory;

import java.nio.channels.ClosedChannelException;
import java.util.ArrayDeque;
import java.util.Queue;

/**
 *
 * 这个类的第一句话就吸引了我:
 * 一个channelHandler, 异步处理大数据流,不会使用太多内存,也不会有OOM.
 *
 * 貌似很牛X的样子.
 *
 * 按这里的定义 ,也是双向的 , 对输入和输出都有处理器
 *
 * A {@link ChannelHandler} that adds support for writing a large data stream
 * asynchronously neither spending a lot of memory nor getting
 * {@link OutOfMemoryError}.
 *
 * Large data streaming such as file transfer requires complicated state management in a {@link ChannelHandler}
 * implementation.
 * 在ChannelHandler的实现类的,对文件的较大的传输流,需要一个完结的状态.
 *
 *
 * {@link ChunkedWriteHandler} manages such complicated states
 * so that you can send a large data stream without difficulties.
 * 那么ChunkedWriteHandler 提供这么一个状态管理来帮你处理数据流 , 没有任何的困难
 *
 *
 *
 * <p>
 * To use {@link ChunkedWriteHandler} in your application, you have to insert
 * a new {@link ChunkedWriteHandler} instance:
 * 因为 ChunkedWriteHandler 是有状态的 , 所以在使用的时候,需要new一个新的实例
 * <pre>
 * {@link ChannelPipeline} p = ...;
 * p.addLast("streamer", <b>new {@link ChunkedWriteHandler}()</b>);
 * p.addLast("handler", new MyHandler());
 * </pre>
 *
 *
 * Once inserted, you can write a {@link ChunkedInput} so that the
 * {@link ChunkedWriteHandler} can pick it up and fetch the content of the
 * stream chunk by chunk and write the fetched chunk downstream:
 * <pre>
 * {@link Channel} ch = ...;
 * ch.write(new {@link ChunkedFile}(new File("video.mkv"));
 * </pre>
 * 一旦使用了这个插件, 你可以输出 ChunkedInput到 ChunkedWriteHandler, 那么ChunkedWriteHandler可以按块数据来读取这个数据流.
 * 然后将这些块数据流写到下游.
 * ChunkedInput 有以下7个实现类,后面要一一理解
 *  TODO , 要注释以下7种处理类
 * 1)ChunkedFile (io.netty.handler.stream)
 * 2)ChunkedNioFile (io.netty.handler.stream)
 * 3)ChunkedNioStream (io.netty.handler.stream)
 * 4)ChunkedStream (io.netty.handler.stream)
 * 5)HttpChunkedInput (io.netty.handler.codec.http)
 * 6)HttpPostRequestEncoder (io.netty.handler.codec.http.multipart)
 * 7)WebSocketChunkedInput (io.netty.handler.codec.http.websocketx)
 *
 *
 * <h3>Sending a stream which generates a chunk intermittently</h3>
 * 发送生成的一块块的数据流
 *
 * Some {@link ChunkedInput} generates a chunk on a certain event or timing.
 * 一些ChunkedInput生成块时, 会在特定的事件触发和时间点触发
 *
 * Such {@link ChunkedInput} implementation often returns {@code null} on
 * {@link ChunkedInput#readChunk(ChannelHandlerContext)}, resulting in the indefinitely suspended
 * transfer.  To resume the transfer when a new chunk is available, you have to
 * call {@link #resumeTransfer()}.
 *
 * ### 这个不太好理解,
 * ChunkedInput 的传输是断断续续的, 那么,当没有数据传输时,
 * ChunkedInput#readChunk(ChannelHandlerContext)方法通常返回 null
 *
 * 然后当有新的chunk数据可用时,可以调用resumeTransfer(),来重新启用数据传输.
 *
 * TODO : 这里有一个不理解的点,
 * 知道 ChunkedInput 的传输是断断续续的,但是没有停止啊. 有chunk生成出来,理应就会传输的呀,
 * 难道每传输完一块数据,就调用一次 resumeTransfer() ? 不科学吧.
 *
 *
 *
 */

/**
 * 异步处理数据流
 */
public class ChunkedWriteHandler extends ChannelDuplexHandler {

    /**netty 内部自己实现的logger工厂,使用slf4j*/
    private static final InternalLogger logger =
        InternalLoggerFactory.getInstance(ChunkedWriteHandler.class);


    /**
     * 一个队列,暂时还不知道做什么的,
     * 一个以数组实现的双向队列.
     * 默认16
     */
    private final Queue<PendingWrite> queue = new ArrayDeque<PendingWrite>();
    /**
     * channel上下文
     * TODO : 这里为什么要使用  volatile
     */
    private volatile ChannelHandlerContext ctx;

    /**
     * 字面意思理解: 等到写 / 一直到写.
     * 没有写动作时阻塞, 直到写动作出现
     */
    private PendingWrite currentWrite;

    /**
     * 构造函数,没有需要初始化的对象或数据
     */
    public ChunkedWriteHandler() {
    }

    /**
     * 没看懂这个方法是什么,但既然已经弃用,这个方法也没什么东西,就不看了
     * @deprecated use {@link #ChunkedWriteHandler()}
     */
    @Deprecated
    public ChunkedWriteHandler(int maxPendingWrites) {
        if (maxPendingWrites <= 0) {
            throw new IllegalArgumentException(
                    "maxPendingWrites: " + maxPendingWrites + " (expected: > 0)");
        }
    }

    /**
     * 保留channel上下文
     * @param ctx
     * @throws Exception
     */
    @Override
    public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
        this.ctx = ctx;
    }

    /**
     * Continues to fetch the chunks from the input.
     * 继续从数据块中[拉取]数据
     */
    public void resumeTransfer() {
        final ChannelHandlerContext ctx = this.ctx;
        if (ctx == null) {
            return;
        }

        //当前还在eventLoop中,那么就在当前的eventLoop中,再拉取数据
        if (ctx.executor().inEventLoop()) {
            resumeTransfer0(ctx);
        } else {
            // let the transfer resume on the next event loop round
            // 让传输动作 在下次eventLoop中再启用
            ctx.executor().execute(new Runnable() {

                @Override
                public void run() {
                    resumeTransfer0(ctx);
                }
            });
        }
    }

    /**
     * 启用传输
     * @param ctx
     */
    private void resumeTransfer0(ChannelHandlerContext ctx) {
        try {
            //刷数据
            doFlush(ctx);
        } catch (Exception e) {
            if (logger.isWarnEnabled()) {
                logger.warn("Unexpected exception while sending chunks.", e);
            }
        }
    }


    @Override
    public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
        //将写操作,放入队列
        queue.add(new PendingWrite(msg, promise));
    }

    @Override
    public void flush(ChannelHandlerContext ctx) throws Exception {
        //重写刷操作
        doFlush(ctx);
    }

    @Override
    public void channelInactive(ChannelHandlerContext ctx) throws Exception {
        //断开链接,刷缓存
        doFlush(ctx);
        //断开链接
        ctx.fireChannelInactive();
    }

    @Override
    public void channelWritabilityChanged(ChannelHandlerContext ctx) throws Exception {
        if (ctx.channel().isWritable()) {
            // channel is writable again try to continue flushing
            doFlush(ctx);
        }
        ctx.fireChannelWritabilityChanged();
    }
    //丢弃
    private void discard(Throwable cause) {
        for (;;) {
            PendingWrite currentWrite = this.currentWrite;

            if (this.currentWrite == null) {
                currentWrite = queue.poll();
            } else {
                this.currentWrite = null;
            }

            if (currentWrite == null) {
                break;
            }
            Object message = currentWrite.msg;
            if (message instanceof ChunkedInput) {
                ChunkedInput<?> in = (ChunkedInput<?>) message;
                try {
                    if (!in.isEndOfInput()) {
                        if (cause == null) {
                            cause = new ClosedChannelException();
                        }
                        currentWrite.fail(cause);
                    } else {
                        currentWrite.success(in.length());
                    }
                    closeInput(in);
                } catch (Exception e) {
                    currentWrite.fail(e);
                    if (logger.isWarnEnabled()) {
                        logger.warn(ChunkedInput.class.getSimpleName() + ".isEndOfInput() failed", e);
                    }
                    closeInput(in);
                }
            } else {
                if (cause == null) {
                    cause = new ClosedChannelException();
                }
                currentWrite.fail(cause);
            }
        }
    }

    /**
     * 看代码量,相信这块是重点
     * @param ctx
     */
    private void doFlush(final ChannelHandlerContext ctx) {
        //取得channel
        final Channel channel = ctx.channel();
        //channel死了
        if (!channel.isActive()) {
            //那么未发送数据丢弃
            //channel死了,对象应该也是要挂掉的.
            //TODO 这里丢数据,都丢了什么
            discard(null);
            return;
        }
        //配置是否需要刷新,默认需要
        //TODO 为什么需要
        boolean requiresFlush = true;

        //默认分配一个内存空间
        ByteBufAllocator allocator = ctx.alloc();
        //如果channel是可写的
        while (channel.isWritable()) {
            if (currentWrite == null) {
                //当前没有 write,取队列里的
                currentWrite = queue.poll();
            }

            if (currentWrite == null) {
                //队列里没有, 结束
                break;
            }

            //定义为final,方便开线程使用
            final PendingWrite currentWrite = this.currentWrite;
            //取得msg,定义为final,方便开线程使用
            final Object pendingMessage = currentWrite.msg;

            if (pendingMessage instanceof ChunkedInput) {
                final ChunkedInput<?> chunks = (ChunkedInput<?>) pendingMessage;
                boolean endOfInput;
                boolean suspend;
                Object message = null;
                try {
                    message = chunks.readChunk(allocator);
                    endOfInput = chunks.isEndOfInput();

                    if (message == null) {
                        // No need to suspend when reached at the end.
                        // 中断/停止
                        // 输入结束,不停止
                        // 输入未结束 , 停止
                        suspend = !endOfInput;
                    } else {
                        // 有数据,不停止
                        suspend = false;
                    }
                } catch (final Throwable t) {
                    //读取数据失败
                    this.currentWrite = null;

                    if (message != null) {
                        //已经有数据读取出来了
                        // 将读取出来的数据,从引用中释放掉.可以快速的被GC
                        ReferenceCountUtil.release(message);
                    }
                    //写操作设置为失败
                    currentWrite.fail(t);
                    //关闭输入流
                    closeInput(chunks);
                    //结束循环
                    break;
                }

                if (suspend) {
                    // ChunkedInput.nextChunk() returned null and it has
                    // not reached at the end of input. Let's wait until
                    // more chunks arrive. Nothing to write or notify.
                    break;
                }

                if (message == null) {
                    // If message is null write an empty ByteBuf.
                    // See https://github.com/netty/netty/issues/1671
                    // 当消息为null时, 默认为一个空对象
                    message = Unpooled.EMPTY_BUFFER;
                }

                //写消息,这个消息,只是其中的一块
                ChannelFuture f = ctx.write(message);
                if (endOfInput) {
                    this.currentWrite = null;

                    // Register a listener which will close the input once the write is complete.
                    // This is needed because the Chunk may have some resource bound that can not
                    // be closed before its not written.
                    //
                    // See https://github.com/netty/netty/issues/303
                    f.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture future) throws Exception {
                            currentWrite.progress(chunks.progress(), chunks.length());
                            currentWrite.success(chunks.length());
                            closeInput(chunks);
                        }
                    });
                } else if (channel.isWritable()) {
                    f.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture future) throws Exception {
                            if (!future.isSuccess()) {
                                closeInput((ChunkedInput<?>) pendingMessage);
                                currentWrite.fail(future.cause());
                            } else {
                                currentWrite.progress(chunks.progress(), chunks.length());
                            }
                        }
                    });
                } else {
                    f.addListener(new ChannelFutureListener() {
                        @Override
                        public void operationComplete(ChannelFuture future) throws Exception {
                            if (!future.isSuccess()) {
                                closeInput((ChunkedInput<?>) pendingMessage);
                                currentWrite.fail(future.cause());
                            } else {
                                currentWrite.progress(chunks.progress(), chunks.length());
                                if (channel.isWritable()) {
                                    resumeTransfer();
                                }
                            }
                        }
                    });
                }
                // Flush each chunk to conserve memory
                ctx.flush();
                requiresFlush = false;
            } else {
                this.currentWrite = null;
                ctx.write(pendingMessage, currentWrite.promise);
                requiresFlush = true;
            }

            if (!channel.isActive()) {
                discard(new ClosedChannelException());
                break;
            }
        }

        if (requiresFlush) {
            ctx.flush();
        }
    }

    private static void closeInput(ChunkedInput<?> chunks) {
        try {
            chunks.close();
        } catch (Throwable t) {
            if (logger.isWarnEnabled()) {
                logger.warn("Failed to close a chunked input.", t);
            }
        }
    }

    /**
     * 等待写操作
     */
    private static final class PendingWrite {
        final Object msg;
        final ChannelPromise promise;

        PendingWrite(Object msg, ChannelPromise promise) {
            this.msg = msg;
            this.promise = promise;
        }

        void fail(Throwable cause) {
            ReferenceCountUtil.release(msg);
            promise.tryFailure(cause);
        }

        void success(long total) {
            if (promise.isDone()) {
                // No need to notify the progress or fulfill the promise because it's done already.
                return;
            }
            progress(total, total);
            promise.trySuccess();
        }

        void progress(long progress, long total) {
            if (promise instanceof ChannelProgressivePromise) {
                ((ChannelProgressivePromise) promise).tryProgress(progress, total);
            }
        }
    }
}
