package tsif.tcluster.netty;

import java.util.Locale;
import java.util.concurrent.TimeUnit;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.ChannelHandler;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.handler.timeout.IdleState;
import io.netty.handler.timeout.IdleStateEvent;
import io.netty.handler.timeout.IdleStateHandler;
import tsif.tcluster.TClusterContext;
import tsif.tcluster.netty.TClusterNettyServer.ConnectSession.TClusterServerHandler;
import tsif.tcluster.netty.codec.TClusterNettyDecoder;
import tsif.tcluster.netty.codec.TClusterNettyEncoder;
import tsif.tcluster.netty.codec.TClusterNettyMessage;
import tsif.tcluster.netty.impl.NettyServer;
import tsif.tcluster.rpc.TClusterRpcInfoMsg;
import tsif.tcluster.rpc.TClusterRpcProtocol;
import tsif.tcluster.rpc.TClusterRpcUtils;

/**
 * 集群云服务器<br>
 * 用于监听其他集群环境的连接访问<br>
 * 
 */
public class TClusterNettyServer extends NettyServer {
	protected final static int BUFF_SIZE = 1024 * 1024 * 10; // rpc消息缓存大小

	protected TClusterNettyContext context;

	public TClusterNettyServer(TClusterNettyContext context) {
		this.context = context;
	}

	@Override
	protected boolean initOption(ServerBootstrap bootstrap) {
		bootstrap.option(ChannelOption.SO_REUSEADDR, true);
		bootstrap.option(ChannelOption.SO_RCVBUF, BUFF_SIZE);
		bootstrap.option(ChannelOption.SO_BACKLOG, 128);
		bootstrap.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

		bootstrap.childOption(ChannelOption.SO_KEEPALIVE, true);
		bootstrap.childOption(ChannelOption.SO_LINGER, 0);
		bootstrap.childOption(ChannelOption.TCP_NODELAY, true);
		// 接收缓存大小 总结: 通常情况下, 我个人经验是不建议设置rcv_buf, linux内核会对每一个连接做动态的调整, 一般情况下足够智能, 如果设置死了, 就失去了这个特性, 尤其是大量长连接的应用,
		String osname = System.getProperty("os.name").toLowerCase(Locale.US);
		if (osname.indexOf("windows") >= 0) {
			bootstrap.childOption(ChannelOption.SO_RCVBUF, BUFF_SIZE); // windows下不会自动调整.
		}
		bootstrap.childOption(ChannelOption.SO_SNDBUF, BUFF_SIZE);
		bootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);

		// 高水位线和低水位线，当buffer的大小超过高水位线的时候对应channel的isWritable就会变成false，当buffer的大小低于低水位线的时候，isWritable就会变成true。
		// 所以应用应该判断isWritable，如果是false就不要再写数据了。高水位线和低水位线是字节数，默认高水位是64K，低水位是32K，我们可以根据我们的应用需要支持多少连接数和系统资源进行合理规划
		bootstrap.childOption(ChannelOption.WRITE_BUFFER_HIGH_WATER_MARK, (int) (BUFF_SIZE * 0.8));
		bootstrap.childOption(ChannelOption.WRITE_BUFFER_LOW_WATER_MARK, (int) (BUFF_SIZE * 0.5));
		return true;
	}

	@Override
	protected ChannelHandler createHandler() {
		return new ChannelInitializer<SocketChannel>() {
			@Override
			protected void initChannel(SocketChannel ch) throws Exception {
				ConnectSession session = new ConnectSession(context, ch);
				TClusterServerHandler handler = session.new TClusterServerHandler();
				handler.setSession(session);

				// init channel
				ChannelPipeline pipeline = ch.pipeline();
				pipeline.addLast(new IdleStateHandler(5 * 60, 0, 0, TimeUnit.SECONDS));
				pipeline.addLast("decoder", new TClusterNettyDecoder());
				pipeline.addLast("encoder", new TClusterNettyEncoder());
				pipeline.addLast("handler", handler);
			}
		};
	}

	@Override
	protected EventLoopGroup createParentGroup(String name) {
		NioEventLoopGroup eventLoopGroup = context.getEventLoopGroup();
		if (eventLoopGroup != null) {
			return eventLoopGroup;
		}
		return super.createParentGroup(name);
	}

	@Override
	protected EventLoopGroup createChildGroup(String name) {
		NioEventLoopGroup eventLoopGroup = context.getEventLoopGroup();
		if (eventLoopGroup != null) {
			return eventLoopGroup;
		}
		return super.createChildGroup(name);
	}

	/** 集群客户端session **/
	protected static class ConnectSession extends TClusterNettySession {
		public ConnectSession(TClusterContext context, SocketChannel ch) {
			super((TClusterNettyContext) context, 0, ch);
		}

		@Override
		protected void recv(TClusterNettyMessage packet) throws Exception {
			short code = packet.getCode();
			// connect
			if (code == TClusterRpcProtocol.RPC_INFO) {
				TClusterRpcInfoMsg infomsg = TClusterRpcUtils.toObject(packet.getData(), TClusterRpcInfoMsg.class);
				if (infomsg == null) {
					throw new RuntimeException("rpc info error! " + infomsg);
				}

				// check connectId(不检测, 问题很多种)
				// int connectContextId = infomsg.getTargetContextId();
				// if (connectContextId != 0 && ConnectSession.this.context.getContextId() != connectContextId) {
				// throw new RuntimeException("rpc connect contextId error! " + connectContextId + " != " + ConnectSession.this.context.getContextId());
				// }

				// set contextId
				ConnectSession.this.targetContextId = infomsg.getSourceContextId();
				return;
			}

			// 其他处理
			super.recv(packet);
		}

		/** session处理接口 **/
		class TClusterServerHandler extends TClusterNettySessionHandler {
			@Override
			public void channelActive(ChannelHandlerContext ctx) throws Exception {
				super.channelActive(ctx);
				TClusterNettyContext context = getContext();
				if (context == null || !context.isRunning()) {
					ctx.close();
					return;
				}
			}

			@Override
			public void userEventTriggered(ChannelHandlerContext ctx, Object evt) throws Exception {
				if (evt instanceof IdleStateEvent) {
					IdleStateEvent event = (IdleStateEvent) evt;
					if (event.state() == IdleState.READER_IDLE) {
						ctx.channel().close(); // 闲置断开
					}
				} else {
					super.userEventTriggered(ctx, evt);
				}
			}
		}

	}

	@Override
	public boolean startSync(int port) throws Exception {
		return startSync(port, 10 * 1000);
	}
}