package com.freedom.monitor.myeye.metrics.server;

import java.util.Properties;

import com.freedom.monitor.myeye.metrics.server.handler.HttpUserHandler;
import com.freedom.monitor.myeye.metrics.server.utils.PropertyUtils;
import com.freedom.rpc.thrift.common.utils.Logger;
import com.freedom.rpc.thrift.common.zookeeper.client.ClientSideZkReadyListener;

import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.stream.ChunkedWriteHandler;

public class MetricsServer {
	private static final Logger logger = Logger.getLogger(MetricsServer.class);
	private static final String NETTY_PORT = "nettyPort";
	private static final String NETTY_BOSS = "nettyBoss";
	private static final String NETTY_SELECTOR = "nettySelector";
	private static final String NETTY_MAX_SIZE = "nettyMaxSize";
	static {
		// RPC框架需要先执行这一步
		ClientSideZkReadyListener.ready();
	}

	public static void main(String[] args) {
		// metrics server主函数
		logger.info("MetricsServer.main start...");
		// 获取属性文件
		int cpu = Runtime.getRuntime().availableProcessors();
		logger.info("cpu --->" + cpu);
		Properties prop = PropertyUtils.getInstance();
		int port = Integer.parseInt(prop.getProperty(NETTY_PORT));
		int bossNum = Integer.parseInt(prop.getProperty(NETTY_BOSS));
		int selectorNum = cpu * Integer.parseInt(prop.getProperty(NETTY_SELECTOR));
		final int maxSize = Integer.parseInt(prop.getProperty(NETTY_MAX_SIZE));
		logger.info("port: " + port + "\n bossNum: " + bossNum + "\n selectorNum: " + selectorNum + "\n maxSize: "
				+ maxSize);
		// 创建http服务器
		EventLoopGroup bossGroup = new NioEventLoopGroup(bossNum);
		EventLoopGroup workerGroup = new NioEventLoopGroup(selectorNum);
		try {
			ServerBootstrap b = new ServerBootstrap();
			b.option(ChannelOption.SO_BACKLOG, 8192 * 2);
			b.group(bossGroup, workerGroup)//
					.channel(NioServerSocketChannel.class)//
					.childHandler(new ChannelInitializer<SocketChannel>() {
						@Override
						protected void initChannel(SocketChannel ch) throws Exception {
							// 这几个都是框架需要，完成HTTP协议的编解码所用
							// logger.debug("one connection " + ch);
							ch.pipeline().addLast("http_decoder", new HttpRequestDecoder());
							ch.pipeline().addLast("http_aggregator", new HttpObjectAggregator(maxSize));// 默认1M
							ch.pipeline().addLast("http_encoder", new HttpResponseEncoder());
							ch.pipeline().addLast("http_chunked", new ChunkedWriteHandler());
							// 真正处理用户级业务逻辑的地方
							ch.pipeline().addLast("http_user_defined", new HttpUserHandler());
						}
					});
			// 开始真正绑定端口进行监听
			ChannelFuture future = b.bind("0.0.0.0", port).sync();
			logger.info("listening on 0.0.0.0:" + port);
			future.channel().closeFuture().sync();
		} catch (Exception e) {
			logger.error(e.toString());
		} finally {
			bossGroup.shutdownGracefully();
			workerGroup.shutdownGracefully();
			logger.info("server exit...");
		}

	}
}
