
package com.naza.rpc.netty;

import com.google.common.util.concurrent.FutureCallback;
import com.google.common.util.concurrent.Futures;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import com.google.common.util.concurrent.MoreExecutors;

import com.naza.rpc.compiler.AccessAdaptiveProvider;
import com.naza.rpc.core.AbilityDetailProvider;
import com.naza.rpc.core.RpcSystemConfig;
import com.naza.rpc.discovery.ConsulDiscovery;
import com.naza.rpc.discovery.NacosDiscovery;
import com.naza.rpc.model.MessageKeyVal;
import com.naza.rpc.model.RpcRequest;
import com.naza.rpc.model.RpcResponse;
import com.naza.rpc.model.ServiceInstance;
import com.naza.rpc.parallel.NamedThreadFactory;
import com.naza.rpc.parallel.RpcThreadPool;
import com.naza.rpc.register.ConsulRegister;
import com.naza.rpc.register.NacosRegister;
import com.naza.rpc.register.Registry;
import com.naza.rpc.register.ZookeeperRegister;
import com.naza.rpc.serialize.RpcSerializeProtocol;
import com.naza.rpc.util.PropertiesUtil;
import io.netty.bootstrap.ServerBootstrap;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.channel.AdaptiveRecvByteBufAllocator;
import io.netty.channel.ChannelFuture;
import io.netty.channel.ChannelFutureListener;
import io.netty.channel.ChannelHandlerContext;
import io.netty.channel.ChannelOption;
import io.netty.channel.EventLoopGroup;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;

import java.nio.channels.spi.SelectorProvider;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorCompletionService;
import java.util.logging.Level;

import org.springframework.beans.BeansException;
import org.springframework.context.ApplicationContext;
import org.springframework.context.ApplicationContextAware;

/**
 * Rpc服务端执行模块 通过spring自动加载rpc服务接口、接口实现容器绑定加载， 初始化netty主/从线程池等操作，具体是通过该类实现。
 * 
 * @author yl
 */
public class RpcReceiveExecutor implements ApplicationContextAware {

    private String serverAddress;

    private String host;

    private int port;

    private int echoApiPort;

    // RPC 使用的序列化方式
    private RpcSerializeProtocol serializeProtocol = RpcSerializeProtocol.JDKSERIALIZE;

    // :
    private static final String DELIMITER = RpcSystemConfig.DELIMITER;

    // 返回Java虚拟机可以的处理器数量
    private static final int PARALLEL = RpcSystemConfig.SYSTEM_PROPERTY_PARALLEL * 2;

    private static int threadNums = RpcSystemConfig.SYSTEM_PROPERTY_THREADPOOL_THREAD_NUMS;

    private static int queueNums = RpcSystemConfig.SYSTEM_PROPERTY_THREADPOOL_QUEUE_NUMS;

    private static volatile ListeningExecutorService threadPoolExecutor;

    //服务类名+服务实现类实例（ServiceFilterBinder包装）的map
    private Map<String, Object> handlerMap = new ConcurrentHashMap<String, Object>();

    private int numberOfEchoThreadsPool = 1;

    // netty的线程池模型设置成主从线程池模式，这样可以应对高并发请求
    // 当然netty还支持单线程、多线程网络IO模型，可以根据业务需求灵活配置
    ThreadFactory threadRpcFactory = new NamedThreadFactory("Naza ThreadFactory");

    EventLoopGroup boss = new NioEventLoopGroup();

    EventLoopGroup worker = new NioEventLoopGroup(PARALLEL, threadRpcFactory, SelectorProvider.provider());

    private Registry registry;

    private RpcReceiveExecutor() {
        handlerMap.clear();
        register();
        String discoveryMode = PropertiesUtil.getINSTANCE().getValue("discovery.mode");
        if("nacos".contains(discoveryMode)){
            registry = new NacosRegister();
        }
        if("consul".contains(discoveryMode)){
            registry = new ConsulRegister();
        }
        if("zookeeper".contains(discoveryMode)){
            registry = new ZookeeperRegister();
        }
    }

    private static class MessageRecvExecutorHolder {
        static final RpcReceiveExecutor INSTANCE = new RpcReceiveExecutor();
    }

    public static RpcReceiveExecutor getInstance() {
        return MessageRecvExecutorHolder.INSTANCE;
    }

    // RPC 服务端业务处理线程池
    public static void submit(Callable<Boolean> task, final ChannelHandlerContext ctx,
                              final RpcRequest request, final RpcResponse response) {
        if (threadPoolExecutor == null) {
            synchronized (RpcReceiveExecutor.class) {
                if (threadPoolExecutor == null) {
                    // 创建线程池
                    threadPoolExecutor = MoreExecutors
                            .listeningDecorator((ThreadPoolExecutor) (RpcSystemConfig.isMonitorServerSupport()
                                    ? RpcThreadPool.getExecutorWithJmx(threadNums, queueNums)
                                    : RpcThreadPool.getExecutor(threadNums, queueNums)));
                }
            }
        }
        // 执行业务逻辑
        ListenableFuture<Boolean> listenableFuture = threadPoolExecutor.submit(task);
        Futures.addCallback(listenableFuture, new FutureCallback<Boolean>() {
            @Override
            public void onSuccess(Boolean result) {
                // 业务逻辑执行成功回调
                ctx.writeAndFlush(response).addListener(new ChannelFutureListener() {
                    @Override
                    public void operationComplete(ChannelFuture channelFuture) throws Exception {
                        // 结果成功返回给客户端,打印日志
                        System.out.println("RPC Server Send message-id respone:" + request.getMessageId());
                    }
                });
            }

            @Override
            public void onFailure(Throwable t) {
                t.printStackTrace();
            }
        }, threadPoolExecutor);
    }

    /**
     * 本类是new出来的,不是bean，此处不会调用
     * @see org.springframework.context.ApplicationContextAware#setApplicationContext(org.springframework.context.ApplicationContext)
     */
    @Override
    public void setApplicationContext(ApplicationContext ctx) throws BeansException {
        try {
            MessageKeyVal keyVal = (MessageKeyVal) ctx
                    .getBean(Class.forName("com.naza.rpc.model.MessageKeyVal"));
            Map<String, Object> rpcServiceObject = keyVal.getMessageKeyVal();

            Set s = rpcServiceObject.entrySet();
            Iterator<Map.Entry<String, Object>> it = s.iterator();
            Map.Entry<String, Object> entry;

            while (it.hasNext()) {
                entry = it.next();
                handlerMap.put(entry.getKey(), entry.getValue());
            }
        } catch (ClassNotFoundException ex) {
            java.util.logging.Logger.getLogger(RpcReceiveExecutor.class.getName()).log(Level.SEVERE, null,
                    ex);
        }
    }

    // 启动netty
    public void start() {
        try {
            for(Map.Entry<String,Object> entry : handlerMap.entrySet()){
                //如果性能不行，需要异步注册
                registry.register(create(entry.getKey()));
            }


            // 启动netty
            ServerBootstrap bootstrap = new ServerBootstrap();
            bootstrap.group(boss, worker).channel(NioServerSocketChannel.class)
                    .childHandler(new RpcReceiveChannelInitializer(handlerMap)
                            .buildRpcSerializeProtocol(serializeProtocol))
                    // 设置连接队列深度为128
                    .option(ChannelOption.SO_BACKLOG, 128)
                    // 来自rsf 设置no_delay
                    .option(ChannelOption.TCP_NODELAY, true)
                    // 设置channel no_delay
                    .childOption(ChannelOption.TCP_NODELAY, true)
                    // 设置可以重用time_wait的socket
                    .childOption(ChannelOption.SO_REUSEADDR, true)
                    .childOption(ChannelOption.SO_KEEPALIVE, false)
                    // 设置SO_LINGER为2秒
                    .childOption(ChannelOption.SO_LINGER, 2)
                    .childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
                    .childOption(ChannelOption.RCVBUF_ALLOCATOR, AdaptiveRecvByteBufAllocator.DEFAULT);

            String[] ipAddr = serverAddress.split(RpcReceiveExecutor.DELIMITER);
            // 地址必须是ip:port形式，否则报错
            if (ipAddr.length == RpcSystemConfig.IPADDR_OPRT_ARRAY_LENGTH) {
                host = ipAddr[0];
                port = Integer.parseInt(ipAddr[1]);
                ChannelFuture future = null;
                // netty绑定端口号阻塞启动
                future = bootstrap.bind(host, port).sync();

                future.addListener(new ChannelFutureListener() {
                    @Override
                    public void operationComplete(final ChannelFuture channelFuture) throws Exception {
                        if (channelFuture.isSuccess()) {
                            final ExecutorService executor = Executors
                                    .newFixedThreadPool(numberOfEchoThreadsPool);
                            ExecutorCompletionService<Boolean> completionService = new ExecutorCompletionService<Boolean>(
                                    executor);
                            // 展示页面
                            // completionService.submit(new
                            // ApiEchoResolver(host, echoApiPort));
                            System.out.printf(
                                    "Naza Server start success!\nip:%s\nport:%d\nprotocol:%s\nstart-time:%s\n\n",
                                    host, port, serializeProtocol,
                                    (RpcSystemConfig.SYSTEM_PROPERTY_JMX_METRICS_SUPPORT ? "open" : "close"));
                            channelFuture.channel().closeFuture().sync()
                                    .addListener(new ChannelFutureListener() {
                                        @Override
                                        public void operationComplete(ChannelFuture future) throws Exception {
                                            executor.shutdownNow();
                                        }
                                    });
                        }
                    }
                });
            } else {
                System.out.printf("Naza RPC Server start fail!\n");
            }
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
    }

    public void stop() {
        worker.shutdownGracefully();
        boss.shutdownGracefully();
    }

    //初始化时添加两个特殊的provider
    private void register() {
        handlerMap.put(RpcSystemConfig.RPC_COMPILER_SPI_ATTR, new AccessAdaptiveProvider());
        handlerMap.put(RpcSystemConfig.RPC_ABILITY_DETAIL_SPI_ATTR, new AbilityDetailProvider());
    }

    public Map<String, Object> getHandlerMap() {
        return handlerMap;
    }

    public void setHandlerMap(Map<String, Object> handlerMap) {
        this.handlerMap = handlerMap;
    }

    public String getServerAddress() {
        return serverAddress;
    }

    public void setServerAddress(String serverAddress) {
        this.serverAddress = serverAddress;
        this.host = serverAddress.substring(0,serverAddress.indexOf(":"));
        this.port = Integer.valueOf(serverAddress.substring(serverAddress.indexOf(":")+1,serverAddress.length()));
    }

    public RpcSerializeProtocol getSerializeProtocol() {
        return serializeProtocol;
    }

    public void setSerializeProtocol(RpcSerializeProtocol serializeProtocol) {
        this.serializeProtocol = serializeProtocol;
    }

    public int getEchoApiPort() {
        return echoApiPort;
    }

    public void setEchoApiPort(int echoApiPort) {
        this.echoApiPort = echoApiPort;
    }

    private ServiceInstance create(String serviceName){
        ServiceInstance serviceInstance = new ServiceInstance();
        serviceInstance.setId(UUID.randomUUID().toString());
        serviceInstance.setIp(host);
        serviceInstance.setPort(port);
        serviceInstance.setServiceName(serviceName);
        serviceInstance.setGroupName("DEFAULT");
        return serviceInstance;
    }
}
