#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File     : pipecoco_server.py
@Project  : pipecoco
@Date     : 2021/8/13
@Author   : Zhang Jinyang
@Contact  : zhang-jy@sjtu.edu.cn
'''
import grpc
import os
import time
import matplotlib.pyplot as plt

from comm.utils import *
from comm.config import *
from comm import pipecoco_pb2_grpc
from pipecoco.pipecoco import *
import pickle
import logging

logger = logging.getLogger("client")
logger_optim = logging.getLogger("best_config")
"""
传算并行客户端，负责客户端的计算和发送数据到服务端。
用户使用时需要继承该类，并重写load_net方法

Args:
    host (str): 服务端的地址. 默认: localhost:50051.
    encoded_mode (str): 数据压缩编码方式.

Returns:
    并行传算客户端.

Examples:
    >>> class server_test(pipecoco_server):
            def __init__(self, host):
                pipecoco_server.__init__(self, host)

            def load_reconstruct_model(self, model_name):
                net = XXX() # XXX为自定义的获取模型结构的方法
                return net
"""


class pipecoco_client:

    def __init__(self, host='localhost:50051', record_path = './record', encoded_mode='lz4', batch_size=10):
        """
        连接服务器并创建rpc信道，初始化自身属性
        :param host: 服务端地址
        :param encoded_mode: 数据压缩编码方法
        """

        # 创建rpc信道
        channel = grpc.insecure_channel(host)
        self.stub = pipecoco_pb2_grpc.pipecoco_inferenceStub(channel)

        self.batch_size = batch_size
        self.record_path = record_path
        if not os.path.exists(record_path):
            os.makedirs(record_path)
        # 存储不同的PipeCoCo模型
        self.models = {}

        self.F_max = {}
        self.record = {}
        self.encoded_mode = encoded_mode
        self.stub.alter_encoded_mode(pipecoco_pb2.encoded_config(encoded_mode=encoded_mode))


    def create_model(self, model_name, input_shape, divided_point=2, input=None, optim=True, batch_size = 10):
        """
        创建传算并行模型
        :param model_name: 创建的网络模型名称:
        :param input_shape: 输入数据的维度，深度、长度和宽度:
        :param input: 最好输入真实样本数据便于根据真实数据找到最佳参数配置
        :param optim: 是否执行最优参数的配置
        """

        # 创建并重构网络
        if model_name not in self.models:
            layers_list = self.load_reconstruct_model(model_name)

            # 融合网络的最大范围
            self.F_max[model_name] = len(layers_list[0])

            # 创建当前网络的传算并行模型，默认为正常模式
            client = PipeCoCo(layers_list, input_shape=input_shape, role='client', G=1,
                              F=0, divided_point = divided_point, batch_size=batch_size)
            self.models[model_name] = client

        param = pipecoco_pb2.model_info(model_name=model_name, input_shape=input_shape, block_num=1,
                                        fused_layers_scope=0, divided_point = divided_point, batch_size=batch_size)

        # 服务器端创建传算并行模型
        self.stub.create_model(param)
        if input==None:
            arr = np.zeros(input_shape)
            input = np.expand_dims(arr,0)

        # 是否找最优配置
        if not optim:
            return
        self.optimize(model_name,input)

    def optimize(self,model_name,input):

        self.create_model_record(model_name, input)
        # 寻找最优参数
        fused_layers_scope, block_num = self.find_optimal_config(model_name)
        self.models[model_name].alter_config(fused_layers_scope, block_num)
        config_info = pipecoco_pb2.pipecoco_config(model_name=model_name, F=fused_layers_scope, G=block_num)
        self.stub.alter_config(config_info)


    def infer(self, input, model_name):
        """
        推理接口，供用户调用
        """

        self.init_record()
        client = self.models[model_name]
        self.start_time = time.time()

        # 根据参数执行特定传算类型的计算
        if client.block_num == 1:
            result = self.normalcoco_infer(input, model_name)
        else:
            result = self.pipecoco_infer(input, model_name, client)
        self.end_time = time.time()
        logger.info("="*70)
        logger.info(
            "{} F = {}, G = {}, total time = {}".format(model_name, client.F, client.block_num, round(self.end_time - self.start_time, 4)))
        if client.block_num != 1:
            logger.info("former layers cal time = {} rest layers cal time = {}".format(round(self.former_layers_end_time-self.former_layers_start_time,4), round(self.rest_layers_end_time - self.rest_layers_start_time, 4)))
        return result


    def pipecoco_infer(self, input, model_name, client):
        """
        执行传算并行计算
        """

        self.former_layers_start_time = time.time()
        x = client.former_layers_forward(input.asnumpy())
        self.former_layers_end_time = time.time()
        # 输入样本的分块计算区域
        init_area_map = client.area_map[:, 0]
        # 客户端和服务器端的传算并行计算
        block_data = self.pipecoco_client_forward(model_name, client, x, init_area_map)
        response = self.stub.pipecoco_forward(block_data)

        # 记录执行过程参数
        self.server_cal_start_time = [t for t in response.server_cal_start_time]
        self.server_cal_end_time = [t for t in response.server_cal_end_time]
        self.transmit_end_time = [t for t in response.transmit_end_time]
        self.rest_layers_start_time = response.rest_layers_start_time
        self.rest_layers_end_time = response.rest_layers_end_time

        return response.fragment


    def pipecoco_client_forward(self, model_name, pipecoco_client, input, init_area_map):
        """
        客户端进行分块迭代计算，并返回迭代对象
        """

        for i, area in enumerate(init_area_map):

            self.client_cal_start_time.append(time.time())
            # print(i)
            # 对数据分块
            area = init_area_map[i]
            x = input[:, :, area[0][0]:area[0][1], area[1][0]:area[1][1]]
            try:
            # 执行客户端部分融合网络的前向传播
                x = pipecoco_client.fused_layers_forward(x, i)
            except Exception as e:
                import traceback
                traceback.print_exc()
            self.client_cal_end_time.append(time.time())
            self.transmit_start_time.append(time.time())

            # 压缩数据
            fragment_info = pack_data(model_name, x, i)
            # if i == (len(init_area_map)-1):
            #     print(pipecoco_client.F,pipecoco_client.block_num,len(fragment_info.fragment))
            yield fragment_info


    def normalcoco_infer(self, input, model_name):
        """
        执行正常模式串行计算
        """
        self.former_layers_start_time = time.time()
        self.former_layers_end_time = time.time()
        client = self.models[model_name]
        self.client_cal_start_time.append(time.time())

        # 客户端计算
        x = client.normal_forward(input.asnumpy())
        fragment_info = pack_data(model_name, x, -1)

        self.client_cal_end_time.append(time.time())
        self.transmit_start_time.append(time.time())

        # 调用服务端计算
        response = self.stub.normalcoco_forward(fragment_info)

        # 计算执行结果
        self.transmit_end_time.append(response.transmit_end_time[0])
        self.server_cal_start_time.append(response.server_cal_start_time[0])
        self.server_cal_end_time.append(response.server_cal_end_time[0])
        self.rest_layers_start_time = 0
        self.rest_layers_end_time = 0

        return response.fragment


    def init_record(self):
        """
        初始化执行记录
        """
        self.former_layers_start_time = 0
        self.former_layers_end_time = 0
        self.client_cal_start_time = []
        self.client_cal_end_time = []
        self.transmit_start_time = []
        self.transmit_end_time = []
        self.server_cal_start_time = []
        self.server_cal_end_time = []
        self.rest_layers_start_time = 0
        self.rest_layers_end_time = 0


    def create_model_record(self, model_name, input):

        batch_size = self.models[model_name].batch_size
        record_path = os.path.join(self.record_path, '{}_pipecoco_record_{}.pkl'.format(model_name, batch_size))
        if not os.path.exists(record_path):
            self.record[model_name] = {}
            self.record[model_name]["process"] = {}
            self.record[model_name]["last_block_area"] = {}
            self.record[model_name]["last_block_transmit_time"] = {}

            for F in range(max(F_scope[0], self.models[model_name].divided_point),
                           min(self.F_max[model_name], F_scope[1])):
                for b in range(2, 6):
                    fused_layers_scope = F
                    self.test_config_performance(fused_layers_scope, b, input, model_name)
            self.test_config_performance(0, 1, input, model_name)

            f = open(record_path, 'wb')
            pickle.dump(self.record[model_name], f)
        else:
            f = open(record_path, 'rb')
            self.record[model_name] = pickle.load(f)


    def test_config_performance(self, F, G, input, model_name):
        """
        对特定组合的参数配置测试运行时间
        """

        self.models[model_name].alter_config(F, G)

        config_info = pipecoco_pb2.pipecoco_config(model_name=model_name, F=F, G=G)
        self.stub.alter_config(config_info)
        self.infer(input, model_name)
        n = len(self.server_cal_start_time)

        logger.info('\t'.join(['  fused stage  ']+[str(i) for i in range(n)]))
        logger.info('\t'.join(['client cal time']+[str(round(self.client_cal_end_time[i] - self.client_cal_start_time[i],4)) for i in range(n)]))
        logger.info('\t'.join(
            [' transmit time '] + [str(round(self.transmit_end_time[i]-self.transmit_start_time[i],4)) for i in range(n)]))
        logger.info('\t'.join(
            ['server cal time'] + [str(round(self.server_cal_end_time[i] - self.server_cal_start_time[i], 4)) for i in
                                   range(n)]))

        self.record[model_name]["process"]['{}_{}'.format(F, G)] = [self.client_cal_start_time, self.client_cal_end_time,
                                                            self.transmit_start_time,
                                                            self.transmit_end_time, self.server_cal_start_time,
                                                            self.server_cal_end_time,
                                                            self.former_layers_start_time,self.former_layers_end_time,
                                                            self.rest_layers_start_time, self.rest_layers_end_time,
                                                            self.end_time - self.start_time]

        self.record[model_name]["last_block_transmit_time"]['{}_{}'.format(F, G)] = self.transmit_end_time[-1] - \
                                                                             self.transmit_start_time[-1]


    def load_reconstruct_model(self, model_name):
        """
        供create_model调用，获取并重构模型
        :param model_name:
        :return:
        """

        net = self.load_net(model_name)
        return layers_partitioner(net)


    def load_net(self, model_name):
        """
        重要！！！需要在子类中重写该方法，获取模型结构
        :param model_name:
        :return: [[各层融合网络列表],[非融合网络]]
        """

        pass


    def alter_server(self, target, host):
        self.target = target
        # 创建rpc信道
        channel = grpc.insecure_channel(host)
        self.stub = pipecoco_pb2_grpc.pipecoco_inferenceStub(channel)


    def alter_batch_size(self, model_name, batch_size):
        self.models[model_name].alter_batch_size(batch_size)
        model_info = pipecoco_pb2.model_info(model_name=model_name, batch_size=batch_size)
        self.stub.alter_batch_size(model_info)


    def cal_pipecoco_time(self, record, last_block_transmit_time):

        """
        传算并行时间可根据以下公式计算出
        客户端结束时间 + 最后一块数据传输时间 + 服务端剩余计算时间
        """
        client_cal_time = record[2][-1] - record[0][0]+record[7]-record[6]
        server_cal_time = record[5][-1] - record[4][-1]+record[9]-record[8]
        total_time = client_cal_time + last_block_transmit_time + server_cal_time
        return total_time


    def find_optimal_config(self, model_name):

        batch_size = self.models[model_name].batch_size
        record = self.record[model_name]["process"]['{}_{}'.format(0, 1)]
        last_block_transmit_time = self.record[model_name]["last_block_transmit_time"]['{}_{}'.format(0, 1)]
        origin = self.cal_pipecoco_time(record, last_block_transmit_time)
        best_performance = origin
        fused_layers_scope = 0
        block_num = 1

        for F in range(max(F_scope[0],self.models[model_name].divided_point), min(self.F_max[model_name], F_scope[1])):
            for b in range(2, 5):
                record = self.record[model_name]["process"]['{}_{}'.format(F, b)]
                last_block_transmit_time = self.record[model_name]["last_block_transmit_time"]['{}_{}'.format(F, b)]
                total_time = record[-1]

                if total_time < best_performance:
                    best_performance = total_time
                    fused_layers_scope = F
                    block_num = b
        logger.info("{} batch_size = {} F = {}, G = {}, best performance = {}, rate = {}".format(model_name, batch_size, fused_layers_scope, block_num, best_performance, best_performance/origin))
        logger_optim.info("{} batch_size = {} F = {}, G = {}, best performance = {}, rate = {}".format(model_name, batch_size,fused_layers_scope, block_num, best_performance, best_performance/origin))
        return fused_layers_scope, block_num


def draw_pipeline(model_name, F, block_num, record, show=False):
    """
    绘制执行过程记录
    :param model_name:
    :param F:
    :param block_num:
    :param record:
    :param show:
    """
    arr = np.array(record[:6]) - record[0][0]
    tran_start_time = arr[2]
    tran_end_time = arr[3]
    client_cal_start_time = arr[0]
    client_cal_end_time = arr[1]
    server_cal_start_time = arr[4]
    server_cal_end_time = arr[5]
    rest_cal_start_time = record[6] - record[0][0]
    rest_cal_end_time = record[7] - record[0][0]
    client_cal_time = [client_cal_end_time[i] - client_cal_start_time[i] for i in range(len(client_cal_start_time))]
    server_cal_time = [server_cal_end_time[i] - server_cal_start_time[i] for i in range(len(server_cal_start_time))]
    transmit_time = [tran_end_time[i] - tran_start_time[i] for i in range(len(tran_end_time))]
    x_index = ['client', 'transmission', 'server']
    last_time = np.array([client_cal_time, transmit_time, server_cal_time]).transpose()
    start_time = np.array([client_cal_start_time, tran_start_time, server_cal_start_time]).transpose()

    if show:
        plt.figure(dpi=280)
        for i in range(block_num * block_num):
            plt.barh(x_index, last_time[i], left=start_time[i])
    if block_num != 1:
        non_fused_cal_time = [0, 0, rest_cal_end_time - rest_cal_start_time]
        if show:
            plt.barh(x_index, non_fused_cal_time, left=[0, 0, rest_cal_start_time])
        total_time = server_cal_end_time[-1] + non_fused_cal_time[-1]
    else:
        total_time = server_cal_end_time[-1]
    if show:
        plt.title('{}  F={}  G={}'.format(model_name, F, block_num))
        plt.show()

    print(F, block_num, 'total time ', total_time, record[-1])
    print('transmit time', sum(transmit_time))
    return total_time
