#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File     : pipecoco.py
@Project  : pipecoco
@Date     : 2021/8/13
@Author   : Zhang Jinyang
@Contact  : zhang-jy@sjtu.edu.cn
'''

from pipecoco.partitioner import *
from pipecoco.pipecoco_layer import *
from mindspore import Tensor
import numpy as np
import mindspore

"""
PipeCoCo传算并行主类：负责传算并行的计算部分

Args:
    layer_partition:包括可分块的融合网络部分和不可分块的非融合网络
    G: 分块数目, F: 融合网络区间
    input_shape:初始输入的维度
    role: 表明客户端/服务端身份
Returns:
    并行传算客户端.

主要方法：
    former_layers_forward: 对初始输入样本执行former_layers结构部分的计算，只在客户端执行

    fused_layers_forward: 执行各分块任务融合网络部分计算，在client端和server端都会执行

    rest_layers_forwar: 执行融合网络之后剩余的网络计算，得到预测结果,只在server端执行
"""


class PipeCoCo:

    def __init__(self, layer_partition, input_shape, G=-1, F=-1, divided_point=2, role='local', batch_size=10):
        """
        封装传算并行模型，初始化网络结构
        :param layer_partition: 网络划分，分为融合网络部分和非融合网络部分
        :param input_shape: 输入数据维度
        :param G: 分块数
        :param F: 待融合的网络区间
        :param role: 客户端/服务端身份
        :param batch_size: 允许一次推理任务执行的批次大小
        :param divided_point: 客户端和服务端执行部分划分点
        """

        # 保存参数
        self.role = role
        self.input_shape = input_shape
        self.batch_size = batch_size
        self.layer_partition = layer_partition
        self.divided_point = divided_point
        # 划分网络结构
        self.origin_layers = self.reconstruct_layers(layer_partition)
        self.alter_config(F, G)

    def former_layers_forward(self, x):
        """
        计算融合网络前网络层
        :param x: 输入
        """

        x = Tensor(x, mindspore.float32)
        for i, layer in enumerate(self.former_layers):
            x = layer(x)
        return x.asnumpy()

    def fused_layers_forward(self, input, block_id):
        """
        对融合网络进行前向传播计算
        :param input: 当前分块的输入
        :param block_id: 当前block的id
        :return:
        """

        # 将输入样本放入预分配空间中

        cur_layer_area = self.area_map[block_id][0]
        if len(self.fused_layers) != 0:
            p = self.fused_layers[0].padding
            if 0 not in input.shape:
                self.output[0][:, :, p[0]+cur_layer_area[0][0]: p[0]+cur_layer_area[0][1],
                p[1]+cur_layer_area[1][0]: p[1]+cur_layer_area[1][1]] = input
        else:
        # 由于由上至下划分过程不均匀，可能出现划分到当前层已经没有计算区域的情况
            if 0 not in input.shape:
                self.output[0][:, :, cur_layer_area[0][0]: cur_layer_area[0][1],
                cur_layer_area[1][0]: cur_layer_area[1][1]] = input
            return input

        j = 0
        # 对每个分块依次按照融合网络顺序计算
        for i, layer in enumerate(self.fused_layers):

            if isinstance(layer, Bypass):
                # 获取Bypass映射到的区域
                identity_area = pre_layer_area(next_layer_area[0], next_layer_area[1], layer.kernel_size, layer.stride)

                # 获取Bypass传递的旁路数据
                pre_p = self.fused_layers[i - layer.h + 1].padding
                identity = Tensor(
                    self.output[j - layer.h + 1][:, :, pre_p[0] + identity_area[0][0]:pre_p[0] + identity_area[0][1],
                    pre_p[1] + identity_area[1][0]:pre_p[1] + identity_area[1][1]], mindspore.float32)

                # 执行bypass合并计算
                x = layer(identity, Tensor(x, mindspore.float32)).asnumpy()
                self.output[j][:, :, p[0] + next_layer_area[0][0]:p[0] + next_layer_area[0][1],
                p[1] + next_layer_area[1][0]:p[1] + next_layer_area[1][1]] = x
            else:

                # 获取当前block在当前层上要计算的区域
                cur_layer_area = self.pre_area_map[block_id][j]
                x = self.output[j][:, :, cur_layer_area[0][0]:cur_layer_area[0][1],
                    cur_layer_area[1][0]:cur_layer_area[1][1]]
                # 如果没有要计算的数据则直接跳过
                if 0 not in x.shape:
                    x = layer(Tensor(x, mindspore.float32)).asnumpy()
                    next_layer_area = self.area_map[block_id][j + 1]

                    p = (0, 0)
                    add = 1
                    while i + add < len(self.fused_layers):
                        if not isinstance(self.fused_layers[i + add], Bypass):
                            p = self.fused_layers[i + add].padding
                            break
                        add += 1
                    self.output[j + 1][:, :, p[0] + next_layer_area[0][0]:p[0] + next_layer_area[0][1],
                    p[1] + next_layer_area[1][0]:p[1] + next_layer_area[1][1]] = x
                j += 1
                # 计算执行完成后赋值给输出的指定区域范围

        return x

    def rest_layers_forward(self):
        """
        剩余部分网络计算
        :return: 预测结果
        """
        # 获取融合网络的输出结果
        x = Tensor(self.output[-1], mindspore.float32)
        if len(self.fused_layers)>0 and isinstance(self.fused_layers[-1], Bypass):
            bypass_input = self.output[-1]
        else:
            bypass_input = np.array([])

        # 迭代执行rest_layers部分计算
        for i, layer in enumerate(self.rest_layers):
            if isinstance(layer, Bypass):
                # 计算Bypass
                if 0 in bypass_input.shape:
                    last_idx = i - layer.h
                    if len(self.fused_layers)!=0:
                        p = self.fused_layers[last_idx + 1].padding
                        bypass_input = self.output[last_idx][:, :, p[0]: -p[1], p[0]: -p[1]]
                    else:
                        bypass_input = self.output[last_idx]
                    
                identity = Tensor(bypass_input, mindspore.float32)
                x = layer(identity, x)
                bypass_input = x
            else:
                x = layer(x)

        return x.asnumpy()

    def normal_forward(self, x):
        # 获取融合网络的输出结果
        x = Tensor(x, mindspore.float32)

        # 迭代执行rest_layers部分计算
        for i, layer in enumerate(self.layers):
            if isinstance(layer, Bypass):
                x = layer(identity, x)
            else:
                if isinstance(layer, pipecoco_layer) and layer.is_bypass:
                    identity = x
                x = layer(x)

        return x.asnumpy()


    def reconstruct_layers(self, layer_partition):
        """
        对各层网络进行封装，使得网络可以自动切换计算模式
        :param layer_partition: 由可融合网络和不可融合网络组成
        :return: 封装后的网络结构列表
        """
        origin_layers = []
        # 融合网络重构
        for i, layer in enumerate(layer_partition[0]):
            if isinstance(layer, list):
                origin_layers.append(pipecoco_layer(layer))
            else:
                if isinstance(layer, Bypass):
                    origin_layers[-2].is_bypass = True
                origin_layers.append(layer)
        # 将可融合网络中未执行传算并行的和剩余的非融合网络加入剩余网络层中
        origin_layers.append(layer_partition[1])
        return origin_layers

    def alter_batch_size(self, batch_size):

        self.batch_size = batch_size

    def alter_config(self, F, block_num):
        """
        根据F和block_num划分结构及调整各分块任务
        """

        self.F = F
        self.block_num = block_num

        if block_num == 1:
            # block_num为1时，按照正常模式端边执行，以divided_point为划分点将模型划分到客户端和服务端
            if self.role == 'server':
                self.layers = self.origin_layers[self.divided_point:]
            else:
                self.layers = self.origin_layers[:self.divided_point]

            self.alter_layers_mode('normal', self.layers)
            return

        # 将模型划分为former layers、 fused layers、 rest layers三部分
        self.former_layers = self.origin_layers[:max(0,self.divided_point - 2)]
        self.alter_layers_mode('normal', self.former_layers)

        self.fused_layers = self.origin_layers[max(0,self.divided_point - 2):F]
        self.alter_layers_mode('fused', self.fused_layers)

        self.rest_layers = self.origin_layers[F:]
        self.alter_layers_mode('normal', self.rest_layers)

        # 计算经过former layers计算后的特征图大小
        c, h, w = self.input_shape
        for j, layer in enumerate(self.former_layers):
            kernel_size, stride, p = layer.kernel_size, layer.stride, layer.padding
            h, w = next_layer_size(h, w, kernel_size, stride, p)
            if hasattr(layer, 'out_channels'):
                c = layer.out_channels
        input_shape = [c, h, w]

        # 计算融合网络每一层的输出维度，便于预分配空间
        output_shapes = cal_shape(input_shape, self.fused_layers)
        # 计算分块后融合网络各层区域划分和映射到的上一层网络区域
        area_map, pre_area_map = layers_area_partition(output_shapes[-1][-2:], self.block_num,
                                                       self.fused_layers[::-1], output_shapes[::-1])

        # 为了使client端计算和传输时间匹配，client端会占据融合网络前两层，server为剩下的层
        if self.role == 'client':
            self.fused_layers_scope = [0, min(2,len(fused_layer))]

        elif self.role == 'server':
            self.fused_layers_scope = [min(2,len(fused_layer)), F - self.divided_point + min(2,len(fused_layer))]

        elif self.role == 'local':
            self.fused_layers_scope = [0, len(fused_layer)]

        # 根据角色分配计算任务
        self.fused_layers = self.fused_layers[self.fused_layers_scope[0]:self.fused_layers_scope[1]]

        # 属于本端的融合网络属性
        self.output_shapes = output_shapes[self.fused_layers_scope[0]:self.fused_layers_scope[1] + 1]
        self.area_map = area_map[:, self.fused_layers_scope[0]:self.fused_layers_scope[1] + 1]
        self.pre_area_map = pre_area_map[:, self.fused_layers_scope[0]:self.fused_layers_scope[1]]

        self.output = []
        # 为固定batch_size
        for j, output_shape in enumerate(self.output_shapes):
            c, h, w = output_shape
            self.output.append(np.zeros([self.batch_size, c, h, w],dtype=np.float32))

    def alter_layers_mode(self, mode, layers_list):
        """
        调整网络结构的计算模式，分块执行的融合网络或整块执行的非融合网络
        """

        for layer in layers_list:
            if isinstance(layer, pipecoco_layer):
                layer.switch_to_mode(mode)