import numpy as np
import mindspore
import uuid
from mindspore import Tensor
from pipecoco.common import *

'''
PipeCoCo类
__init__: 用于封装网络模型
    partitioner: 计算最优划分块数G和融合网络区间F
        G和F的值要通过遍历后计算得出
        default:
            client: 前两层
            server：剩余网络至全连接层
            划分块数: 2×2

forward: 对输入样本进行传算并行的前向计算


layer_partitioner:
    对网络进行划分，保证用于下层计算的区域是可用的；保证要计算的区域不重复
'''

class PipeCoCo:

    '''
    初始化时会自动对网络进行划分
    '''
    def __init__(self, net, input_shape, G, F, role = 'local'):

        # 选取最优的划分方案的参数
        self.role = role
        self.block_num = G
        self.relu = nn.ReLU()

        fused_layers = net[F[0]:F[1]]

        # 计算每一层网络输出维度
        output_shapes = self.cal_shape(input_shape, fused_layers)
        fused_layers_params = self.get_layers_param(fused_layers)

        # 计算分块后融合网络各层区域划分和映射到的上一层网络区域
        area_map, pre_area_map = self.layer_partitioner(output_shapes[-1][-2:], self.block_num,
                                                                  fused_layers[::-1],fused_layers_params[::-1])

        if self.role == 'client':
            # 融合网络各层算子，需要重新创建，将pad_mode统一置为'valid'
            self.fused_layers_scope = [0,2]
        elif self.role == 'server':
            self.fused_layers_scope = [2, F[1]-F[0]]
        elif self.role == 'local':
            self.fused_layers_scope = F

        self.fused_layers = reconstruct_layers(fused_layers[self.fused_layers_scope[0]:self.fused_layers_scope[1]])
        self.non_fused_layers = net[self.fused_layers_scope[1]:]
        self.fused_layers_params = fused_layers_params[self.fused_layers_scope[0]:self.fused_layers_scope[1]]

        self.output_shapes = output_shapes[self.fused_layers_scope[0]:self.fused_layers_scope[1]+1]

        self.area_map = area_map[:, self.fused_layers_scope[0]:self.fused_layers_scope[1]+1]
        self.pre_area_map = pre_area_map[:, self.fused_layers_scope[0]:self.fused_layers_scope[1]]

    def create_session(self, num = 1, session_id = None):

        if session_id==None:
            session_id = uuid.uuid1()
        # 预分配计算结果的空间
        self.output = []
        self.session_id = session_id
        # 预分配存放计算结果的空间
        for j, output_shape in enumerate(self.output_shapes):
            c, h, w = output_shape
            self.output.append(np.zeros([num, c, h, w]))
        init_area = self.area_map[:,0]
        return self.session_id, init_area


    '''
    以传算并行的方式进行前向传播计算
    '''
    def non_fused_layers_forward(self):

        x = Tensor(self.output[-1], mindspore.float32)
        for i,layer in enumerate(self.non_fused_layers[:-1]):
            if i >1:
                x = self.relu(x)
            x = layer(x)

        return x.asnumpy()

    '''
    对融合网络进行前向传播计算
    '''
    def fused_layers_forward(self, input, block_id):

        # 将输入样本放入预分配空间中
        cur_layer_area = self.area_map[block_id][0]
        p = self.fused_layers_params[0][-1]
        self.output[0][:, :, p+cur_layer_area[0][0]:p+cur_layer_area[0][1],
                           p+cur_layer_area[1][0]:p+cur_layer_area[1][1]] = input

        # 对每个分块依次按照融合网络顺序计算
        for j, layer in enumerate(self.fused_layers):

            # 获取当前block在当前层上要计算的区域
            cur_layer_area = self.pre_area_map[block_id][j]

            x = Tensor(self.output[j][:, :, cur_layer_area[0][0]:cur_layer_area[0][1],
                       cur_layer_area[1][0]:cur_layer_area[1][1]], mindspore.float32)

            if j % 2 == 1:
                x = self.non_fused_layers[-1](x)

            x = layer(x)

            next_layer_area = self.area_map[block_id][j+1]

            p = 0
            if j + 1 < len(self.fused_layers):
                p = self.fused_layers_params[j+1][-1]
            # 计算执行完成后赋值给输出的指定区域范围
            self.output[j + 1][:, :, p + next_layer_area[0][0]:p + next_layer_area[0][1],
            p + next_layer_area[1][0]:p + next_layer_area[1][1]] = x.asnumpy()
        return x.asnumpy()


    '''
    融合网络区域分块划分
    '''
    def layer_partitioner(self, size, block_num, layers, layers_params):

        # 计算融合网络最顶层划分方式，保证计算均衡，将多余的计算单元分配给排序较后的区域
        area_map = self.top_layer_area_partition(size, block_num)

        # 用于存放计算area_map中区域单元要用到的上一层对应区域
        pre_area_map = []

        for i in range(block_num):
            for j in range(block_num):
                border_x, border_y = area_map[i * block_num + j][-1]
                pre_area_map.append([])
                for x, layer in enumerate(layers):
                    kernel_size, stride, p = layers_params[x]
                    # 找到上一层网络对应的区域
                    border_x, border_y = pre_layer_area(border_x[0], border_y[0], border_x[1], border_y[1],
                                                        kernel_size, stride)
                    pre_area_map[i * block_num + j].append([border_x, border_y])

                    border_x = [max(0, border_x[0] - p), border_x[1] - p]
                    border_y = [max(0, border_y[0] - p), border_y[1] - p]
                    if i == block_num - 1:
                        border_x[1] -= p
                    if j == block_num - 1:
                        border_y[1] -= p

                    # 去掉已经计算得到的重复的部分
                    if i > 0:
                        border_x = [max(area_map[(i - 1) * block_num + j][x + 1][0][1], border_x[0]), border_x[1]]
                    if j > 0:
                        border_y = [max(area_map[i * block_num + j - 1][x + 1][1][1], border_y[0]), border_y[1]]
                    area_map[i * block_num + j].append([border_x, border_y])

        for i in range(block_num):
            for j in range(block_num):
                area_map[i * block_num + j] = area_map[i * block_num + j][::-1]
                pre_area_map[i * block_num + j] = pre_area_map[i * block_num + j][::-1]
        return np.array(area_map, dtype=np.int64), np.array(pre_area_map, dtype=np.int64)


    '''
    融合网络最顶层划分方法
    由于映射到下层网络区域存在重叠部分，通过倾向于减小先计算的block区域的方式实现均衡负载
    '''
    def top_layer_area_partition(self, size, block_num):

        area_map = []
        height = size[0]
        width = size[1]


        rest_h = height % block_num
        rest_w = width % block_num

        avg_h = height // block_num
        avg_w = width // block_num

        # 先计算的区域长宽较小
        h_units = [avg_h for _ in range(block_num - rest_h)] + [avg_h + 1 for _ in range(rest_h)]
        w_units = [avg_w for _ in range(block_num - rest_w)] + [avg_w + 1 for _ in range(rest_w)]

        if rest_h == 0:
            h_units[0] = h_units[0] - 1
            h_units[-1] = h_units[-1] + 1

        if rest_w == 0:
            w_units[0] = w_units[0] - 1
            w_units[-1] = w_units[-1] + 1

        # 得到最顶层的区域划分方式
        h = 0
        for i, h_unit in enumerate(h_units):
            border_x = (h, h + h_unit)
            w = 0
            for j, w_unit in enumerate(w_units):
                border_y = (w, w + w_unit)
                w += w_unit
                area_map.append([[border_x, border_y]])
            h += h_unit
        return area_map



    '''
    根据输入维度和网络结构计算各层输出维度
    '''
    def cal_shape(self, input_shape, fused_layers):

        c, h, w = input_shape
        output_shape = []
        for j, layer in enumerate(fused_layers):
            kernel_size, stride, p = get_layer_param(layer)
            h += 2 * p
            w += 2 * p
            output_shape.append([c, h, w])
            # 计算下一层输出的大小
            h, w = next_layer_size(h, w, kernel_size, stride)
            if hasattr(layer, 'out_channels'):
                c = layer.out_channels

        output_shape.append([c, h, w])
        return output_shape

    '''
    选取最优的划分块数 G 和融合网络的层结构范围 F
    '''
    def G_F_selector(self):

        return 2, [0,4]

    '''
    获取更网络层kernel_size、stride、padding等关键参数
    '''
    def get_layers_param(self, layers):

        layers_params = []
        for layer in layers:
            k, stride, p = get_layer_param(layer)
            layers_params.append([k,stride,p])
        return layers_params


'''
PipeCoCo的客户端

sync: 用于同步客户端和服务端的模型
    要同步的包括area_map和模型阶段
'''
class PipeCoCo_Client:

    def __init__(self):
        pass

    '''
    在服务器和客户端间同步传算并行的方案
    '''
    def sync(self):
        pass


'''
PipeCoCo的服务端

sync: 用于同步客户端和服务端的模型
    要同步的包括area_map和模型阶段
'''
class PipeCoCo_Server:

    def __init__(self):
        pass


    '''
    在服务器和客户端间同步传算并行的方案
    '''
    def sync(self):
        pass