#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File     : pipecoco_server.py
@Project  : pipecoco
@Date     : 2021/8/13
@Author   : Zhang Jinyang
@Contact  : zhang-jy@sjtu.edu.cn
'''

from comm.utils import  *
from pipecoco.pipecoco import *
import grpc
from concurrent import futures
import time
from pipecoco.utils import *
from comm import pipecoco_pb2_grpc
import logging

logger = logging.getLogger("server")

"""
传算并行服务端，负责接收客户端数据和完成剩余部分计算。
用户使用时需要继承该类，并重写load_net方法

Args:
    host (str): 服务端的地址. 默认: localhost:50051.

Returns:
    并行传算客户端.

Examples:
    >>> class server_test(pipecoco_server):
            def __init__(self, host):
                pipecoco_server.__init__(self, host)

            def load_reconstruct_model(self, model_name):
                net = XXX() # XXX为自定义的获取模型结构的方法
                return net
"""

class pipecoco_server(pipecoco_pb2_grpc.pipecoco_inferenceServicer):

    def __init__(self, host):

        # 存储不同的PipeCoCo模型
        self.models = {}
        self.host = host
        self.encoded_mode = 'encoded'


    def create_model(self, request, context):
        """
        根据传入request请求创建新的PipeCoCo模型
        """

        # 获取网络结构并创建模型
        layers_list = self.load_reconstruct_model(request.model_name)
        
        self.models[request.model_name] = PipeCoCo(layers_list, request.input_shape, request.block_num, request.fused_layers_scope,role='server', batch_size=request.batch_size, divided_point=request.divided_point)

        return pipecoco_pb2.result(msg="{} model, server created.".format(request.model_name, ' server created'),time = time.time(), status =1)


    def pipecoco_forward(self, request_iterator, context):
        """
        服务器端的前向传播计算，分成分块的融合网络计算和完整的非融合网络计算两部分
        """
        transmit_end_time = []
        server_cal_start_time = []
        server_cal_end_time = []
        

        for request in request_iterator:
            transmit_end_time.append(time.time())
            server_cal_start_time.append(time.time())
            # 解码数据
            input = unpack_data(request)

            # 执行当前block_id所属分块的前向传播
            self.models[request.model_name].fused_layers_forward(input,request.block_id)
            server_cal_end_time.append(time.time())

        rest_layers_forward_start_time = time.time()
        # 执行剩余网络的计算
        x = self.models[request.model_name].rest_layers_forward()
        pred = np.argmax(x, axis=1)

        rest_layers_forward_end_time = time.time()
        # 返回预测结果及运行时间
        fragment_info = pipecoco_pb2.infer_result(
            fragment=pred.flatten(), fragment_shape=list(pred.shape),
            transmit_end_time = transmit_end_time,
            server_cal_start_time = server_cal_start_time,
            server_cal_end_time = server_cal_end_time,
            rest_layers_start_time = rest_layers_forward_start_time,
            rest_layers_end_time = rest_layers_forward_end_time
        )

        logger.info("=" * 70)
        logger.info("model_name = {} F = {} G = {}".format(request.model_name, self.models[request.model_name].F, self.models[request.model_name].block_num))
        logger.info('\t'.join(
            ['fused layers cal time'] + [str(round(server_cal_end_time[i] - server_cal_start_time[i], 4)) for i in
                                   range(len(server_cal_end_time))]))
        logger.info("rest layers cal time {}".format(round(rest_layers_forward_end_time - rest_layers_forward_start_time, 4)))
        return fragment_info


    def normalcoco_forward(self, request, context):
        """
        正常模式下的服务端计算
        """
        transmit_end_time = [time.time()]
        server_cal_start_time = [time.time()]

        # 解码数据
        input = unpack_data(request)
        x = self.models[request.model_name].normal_forward(input)
        pred = np.argmax(x, axis=1)
        # 返回预测结果
        server_cal_end_time = [time.time()]
        fragment_info = pipecoco_pb2.infer_result(fragment=pred.flatten(),fragment_shape=list(pred.shape),
                                                  transmit_end_time=transmit_end_time,
                                                  server_cal_start_time=server_cal_start_time,
                                                  server_cal_end_time=server_cal_end_time
                                                  )
        logger.info("=" * 70)
        logger.info("{} F = {} G = {}".format(request.model_name, self.models[request.model_name].F, self.models[request.model_name].block_num))
        logger.info("server cal time"+str(round(server_cal_end_time[-1] - server_cal_start_time[-1], 4)))

        return fragment_info

    def alter_batch_size(self, request, context):

        self.models[request.model_name].alter_batch_size(request.batch_size)
        return pipecoco_pb2.result(msg="accepted", time=time.time(), status=1)

    def accept_data(self, request, context):

        # result = unpack_data(request)
        return pipecoco_pb2.result(msg="accepted", time=time.time(), status=1)

    def accept_stream_data(self, request_iterator, context):
        """
        服务器端的前向传播计算，分成分块的融合网络计算和完整的非融合网络计算两部分
        """
        transmit_end_time = []

        for request in request_iterator:
            transmit_end_time.append(time.time())

        fragment_info = pipecoco_pb2.infer_result(transmit_end_time=transmit_end_time)
        return fragment_info
        
    def alter_config(self, request, context):
        """
        根据客户端请求修改参数配置
        """

        self.models[request.model_name].alter_config(request.F, request.G)
        return pipecoco_pb2.result(msg="alter config success", time=time.time(), status=1)

    def alter_encoded_mode(self, request, context):
        """
        根据客户端请求修改解码方式
        """

        self.encoded_mode = request.encoded_mode
        return pipecoco_pb2.result(msg="alter encoded mode success", time=time.time(), status=1)

    def load_reconstruct_model(self, model_name):
        """
        供create_model调用，获取并重构模型
        """

        net = self.load_net(model_name)
        return layers_partitioner(net)


    def load_net(self,model_name):
        """
        重要！！！需要在子类中重写该方法，获取模型结构
        """

        pass


    def serve(self):
        """
        启动服务器
        """
        MAX_MESSAGE_LENGTH = 256 * 1024 * 1024  # 可根据具体需求设置，此处设为256M
        server_options = [
            ('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
            ('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH) ]

        server = grpc.server(

            # 目前暂时不支持并发执行多个推理任务，涉及资源调度等，后续可增强该能力
            futures.ThreadPoolExecutor(max_workers=2), options=server_options
        )
        pipecoco_pb2_grpc.add_pipecoco_inferenceServicer_to_server(self, server)
        server.add_insecure_port(self.host)
        print('start server')
        server.start()
        try:
            while True:
                time.sleep(60 * 60 * 24)
        except KeyboardInterrupt:
            server.stop(0)
        server.wait_for_termination()