package org.artifact.core.plugin.netty.server;

import cn.hutool.core.map.MapUtil;
import cn.hutool.log.Log;
import cn.hutool.log.LogFactory;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.channel.ChannelInitializer;
import io.netty.channel.ChannelOption;
import io.netty.channel.ChannelPipeline;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.epoll.Epoll;
import io.netty.channel.epoll.EpollEventLoopGroup;
import io.netty.channel.epoll.EpollServerSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.SocketChannel;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.handler.timeout.IdleStateHandler;

import java.util.Map;
import java.util.concurrent.TimeUnit;

/**
 * @author : SandKing
 * @CreateDate : 2020/3/25 15:42
 * @Description ：Please describe this document
 */
public abstract class NettyTcpServer implements NettyServer{
	private static final Log log = LogFactory.get();

	private static final int CPU_MIN_COUNT = 4;
	private static final int CPU_MAX_COUNT = 8;
	public static final int DEFAULT_EVENT_LOOP_THREADS;
	static {
		int count = Runtime.getRuntime().availableProcessors();
		if (count <= CPU_MIN_COUNT) {
			DEFAULT_EVENT_LOOP_THREADS = count * 2;
		} else if (count <= CPU_MAX_COUNT) {
			DEFAULT_EVENT_LOOP_THREADS = count + 4;
		} else {
			DEFAULT_EVENT_LOOP_THREADS = 12;
		}
	}
	/** 名称 */
	protected String name;
	/** 端口 */
	protected int port;
	/** 心跳时间 */
	protected int heartbeat;
	/** 是否激活Epoll */
	protected boolean epollActive;
	/** 工作线程数 */
	protected int workthreads;

	protected ServerBootstrap bootstrap = null;
	/** Boss线程池 */
	protected EventLoopGroup bossGroup = null;
	/** 工作线程池 */
	protected EventLoopGroup workGroup = null;

	@Override
	public void init(Map map) {
		name = MapUtil.getStr(map,"name");
		port = MapUtil.getInt(map,"port");
		heartbeat = MapUtil.getInt(map,"heartbeat");
		workthreads = MapUtil.getInt(map,"worknThread");
		epollActive = MapUtil.getBool(map,"epollActive");


		bootstrap = new ServerBootstrap();

		final int nThreads = workthreads == 0 ? DEFAULT_EVENT_LOOP_THREADS : workthreads;
		if (epollActive && Epoll.isAvailable()) {
			this.bossGroup = new EpollEventLoopGroup(1);
			this.workGroup = new EpollEventLoopGroup(nThreads);
			bootstrap.group(bossGroup, workGroup).channel(EpollServerSocketChannel.class);
		} else {
			this.bossGroup = new NioEventLoopGroup(1);
			this.workGroup = new NioEventLoopGroup(nThreads);
			bootstrap.group(bossGroup, workGroup).channel(NioServerSocketChannel.class);
		}

		// Socket参数，地址复用，默认值false
		bootstrap.option(ChannelOption.SO_REUSEADDR, true);
		// Socket参数，服务端接受连接的队列长度，如果队列已满，客户端连接将被拒绝。默认值，Windows为200，其他为128。
		bootstrap.option(ChannelOption.SO_BACKLOG, 65535);

		// TCP参数，立即发送数据，默认值为Ture（Netty默认为True而操作系统默认为False）。
		// 该值设置Nagle算法的启用，改算法将小的碎片数据连接成更大的报文来最小化所发送的报文的数量，
		// 如果需要发送一些较小的报文，则需要禁用该算法。Netty默认禁用该算法，从而最小化报文传输延时。
		bootstrap.childOption(ChannelOption.TCP_NODELAY, true);

		//报道
		bootstrap.childHandler(new ChannelInitializer<SocketChannel>() {
			public void initChannel(SocketChannel ch) {
				buildChannelPipeline(ch.pipeline());
			}
		});
	}


	@Override
	public void buildChannelPipeline(ChannelPipeline pipeline){
		// 配置了心跳功能，则启用心跳机制.
		if (heartbeat > 0) {
			pipeline.addLast("idleStateHandler", new IdleStateHandler(heartbeat, 0, 0, TimeUnit.SECONDS));
		}
	}

	@Override
	public void startup() {
		try {
			bootstrap.bind(port).sync();
			log.info("Netty Server Start on {}", port);
		} catch (Exception e) {
			log.error(e);
		}
	}

	@Override
	public void shutdown() {
		try {
			bossGroup.shutdownGracefully().sync();
			workGroup.shutdownGracefully().sync();
			log.info("Netty Server Stop Success!");
		} catch (InterruptedException e) {
			log.error(e);
		}
	}
}
