import numpy as np
import mindspore
from mindspore import Tensor
from pipecoco.common import *

'''
PipeCoCo类
__init__: 用于封装网络模型
    partitioner: 计算最优划分块数G和融合网络区间F
        G和F的值要通过遍历后计算得出
        default:
            client: 前两层
            server：剩余网络至全连接层
            划分块数: 2×2

forward: 对输入样本进行传算并行的前向计算


layer_partitioner:
    对网络进行划分，保证用于下层计算的区域是可用的；保证要计算的区域不重复
'''

class PipeCoCo_Client:

    '''
    初始化时会自动对网络进行划分，划分等决策过程在客户端进行
    '''
    def __init__(self, net, input_shape):

        # 选取最优的划分方案的参数
        G, F = self.G_F_selector(net)

        #
        self.block_num = G

        self.fused_layers_scope_client = [F[0], F[0]+2]
        self.fused_layers_scope_server = [F[0]+2, F[1]]

        self.fused_layers = net[self.fused_layers_scope_client[0]:self.fused_layers_scope_client[1]]


        # 确定融合网络和非融合网络
        self.fused_layers = net[F[0]:F[1]]
        self.merge_layers = net[F[1]:]

        # 计算每一层网络输出维度
        self.output_shapes = self.cal_shape(input_shape,self.fused_layers)
        self.fused_layers_params = self.get_layers_param(self.fused_layers)

        # 融合网络各层算子，需要重新创建，将pad_mode统一置为'valid'
        self.fused_layers_opt = reconstruct_layers(self.fused_layers)

        # 计算分块后融合网络各层区域划分和映射到的上一层网络区域
        self.area_map, self.pre_area_map = self.layer_partitioner(self.output_shapes[-1][-2:], self.block_num, self.fused_layers[::-1], self.fused_layers_params[::-1])


    '''
    以传算并行的方式进行前向传播计算
    '''
    def forward(self,input):
        # 先分块执行融合网络
        x = self.fused_layers_forward(input)
        for layer in self.merge_layers:
            x = layer(x)
        return x


    '''
    融合网络区域分块划分
    '''
    def layer_partitioner(self, size, block_num, layers, layers_params):

        # 计算融合网络最顶层划分方式，保证计算均衡，将多余的计算单元分配给排序较后的区域
        area_map = self.top_layer_area_partition(size, block_num)

        # 用于存放计算area_map中区域单元要用到的上一层对应区域
        pre_area_map = {}

        for i in range(block_num):
            for j in range(block_num):
                border_x, border_y = area_map[i * block_num + j][-1]
                pre_area_map[i * block_num + j] = []
                for x, layer in enumerate(layers):
                    kernel_size, stride, p = layers_params[x]
                    # 找到上一层网络对应的区域
                    border_x, border_y = pre_layer_area(border_x[0], border_y[0], border_x[1], border_y[1],
                                                        kernel_size, stride)
                    pre_area_map[i * block_num + j].append([border_x, border_y])

                    border_x = [max(0, border_x[0] - p), border_x[1] - p]
                    border_y = [max(0, border_y[0] - p), border_y[1] - p]
                    if i == block_num - 1:
                        border_x[1] -= p
                    if j == block_num - 1:
                        border_y[1] -= p

                    # 去掉已经计算得到的重复的部分
                    if x + 1 < len(layers):
                        if i > 0:
                            border_x = [max(area_map[(i - 1) * block_num + j][x + 1][0][1], border_x[0]), border_x[1]]
                        if j > 0:
                            border_y = [max(area_map[i * block_num + j - 1][x + 1][1][1], border_y[0]), border_y[1]]
                        area_map[i * block_num + j].append([border_x, border_y])

        for i in range(block_num):
            for j in range(block_num):
                area_map[i * block_num + j] = area_map[i * block_num + j][::-1]
                pre_area_map[i * block_num + j] = pre_area_map[i * block_num + j][::-1]
        return area_map, pre_area_map


    '''
    对融合网络进行前向传播计算
    '''
    def fused_layers_forward(self, input):
        
        # 预分配计算结果的空间
        output = []
        # 输入样本的数量
        m = input.shape[0]

        # 预分配存放计算结果的空间
        for j, output_shape in enumerate(self.output_shapes):
            c, h, w = output_shape
            output.append(np.zeros([m, c, h, w]))

        # 将输入样本放入预分配空间中
        p = self.fused_layers_params[0][-1]
        if p > 0:
            output[0][:, :, p:-p, p:-p] = input
        else:
            output[0] = input

        # 对每个分块依次按照融合网络顺序计算
        for i, block in enumerate(self.area_map.values()):
            for j, layer in enumerate(self.fused_layers):

                # 获取当前block在当前层上要计算的区域
                cur_layer_area = self.pre_area_map[i][j]
                x = Tensor(output[j][:, :, cur_layer_area[0][0]:cur_layer_area[0][1],
                           cur_layer_area[1][0]:cur_layer_area[1][1]], mindspore.float32)

                if j % 2 == 1:
                    x = self.merge_layers[-1](x)

                x = self.fused_layers_opt[j](x)

                next_layer_area = block[j]

                p = 0
                if j + 1 < len(self.fused_layers):
                    p = self.fused_layers_params[j+1][-1]
                # 计算执行完成后赋值给输出的指定区域范围
                output[j + 1][:, :, p + next_layer_area[0][0]:p + next_layer_area[0][1],
                p + next_layer_area[1][0]:p + next_layer_area[1][1]] = x.asnumpy()

        x = Tensor(output[-1], mindspore.float32)
        return x

    '''
    融合网络最顶层划分方法
    由于映射到下层网络区域存在重叠部分，通过倾向于减小先计算的block区域的方式实现均衡负载
    '''
    def top_layer_area_partition(self, size, block_num):

        area_map = {}
        height = size[0]
        width = size[1]


        rest_h = height % block_num
        rest_w = width % block_num

        avg_h = height // block_num
        avg_w = width // block_num

        # 先计算的区域长宽较小
        h_units = [avg_h for _ in range(block_num - rest_h)] + [avg_h + 1 for _ in range(rest_h)]
        w_units = [avg_w for _ in range(block_num - rest_w)] + [avg_w + 1 for _ in range(rest_w)]

        if rest_h == 0:
            h_units[0] = h_units[0] - 1
            h_units[-1] = h_units[-1] + 1

        if rest_w == 0:
            w_units[0] = w_units[0] - 1
            w_units[-1] = w_units[-1] + 1

        # 得到最顶层的区域划分方式
        h = 0
        for i, h_unit in enumerate(h_units):
            border_x = (h, h + h_unit)
            w = 0
            for j, w_unit in enumerate(w_units):
                border_y = (w, w + w_unit)
                w += w_unit
                area_map[i * block_num + j] = [[border_x, border_y]]
            h += h_unit
        return area_map



    '''
    根据输入维度和网络结构计算各层输出维度
    '''
    def cal_shape(self, input_shape, fused_layers):

        c, h, w = input_shape
        output_shape = []
        for j, layer in enumerate(fused_layers):
            kernel_size, stride, p = get_layer_param(layer)
            h += 2 * p
            w += 2 * p
            output_shape.append([c, h, w])
            # 计算下一层输出的大小
            h, w = next_layer_size(h, w, kernel_size, stride)
            if hasattr(layer, 'out_channels'):
                c = layer.out_channels

        output_shape.append([c, h, w])
        return output_shape

    '''
    选取最优的划分块数 G 和融合网络的层结构范围 F
    '''
    def G_F_selector(self, net):

        return 2, [0,4]

    '''
    获取更网络层kernel_size、stride、padding等关键参数
    '''
    def get_layers_param(self, layers):

        layers_params = []
        for layer in layers:
            k, stride, p = get_layer_param(layer)
            layers_params.append([k,stride,p])
        return layers_params


'''
PipeCoCo的客户端

sync: 用于同步客户端和服务端的模型
    要同步的包括area_map和模型阶段
'''
class PipeCoCo_Client:

    def __init__(self, net):
        G, F = self.G_F_selector(net)
        pass

    '''
    在服务器和客户端间同步传算并行的方案
    '''
    def sync(self):
        pass

    '''
    选取最优的划分块数 G 和融合网络的层结构范围 F
    '''
    def G_F_selector(self, net):

        return 2, [0,4]




'''
PipeCoCo的服务端

sync: 用于同步客户端和服务端的模型
    要同步的包括area_map和模型阶段
'''
class PipeCoCo_Server:

    def __init__(self):
        pass


    '''
    在服务器和客户端间同步传算并行的方案
    '''
    def sync(self):
        pass