from mindspore import nn

"""
pipecoco_layer：适用于pipecoco框架的layer封装，可在融合层和非融合层状态切换

Args:
    layers:包括对区域数据执行的卷积、池化操作和只对单独特征执行的激活、正则化操作

主要方法：
    construct: 可像mindspore中网络结构一样自动执行前向传播
    
    switch_to_mode: 可在融合网络分块计算和非融合网络整块计算间切换
"""
class pipecoco_layer(nn.Cell):

    # 初始化算子和激活操作
    def __init__(self, layers):

        super(pipecoco_layer, self).__init__()
        self.mode = 'normal'
        self.is_bypass = False
        # origin_layer和fused_layer分别保存在分块融合模式和正常模式下执行的网络结构
        self.origin_layer = []
        self.fused_layer = []

        self.kernel_size,self.stride,self.padding = get_layer_param(layers[0])
        for single_layer in layers:
            # 由于分块执行没法直接处理pad的操作，因此将Pad操作以增加网络padding的方式执行
            if isinstance(single_layer,nn.Pad):
                self.padding[0] += single_layer.paddings[2][0]
                self.padding[1] += single_layer.paddings[3][1]
                layers.remove(single_layer)
                self.origin_layer.append(single_layer)

        for layer in layers:
            self.origin_layer.append(layer)

        if isinstance(layers[0], nn.Conv2d):
            self.in_channels = layers[0].in_channels
            self.out_channels = layers[0].out_channels

        self.fused_layer = [layer for layer in layers]
        # 对于conv和maxpool操作，包含分块计算和非分块计算版本
        # 对于融合网络算子，需要将pad_mode统一置为'valid'
        if hasattr(layers[0], 'pad_mode') and layers[0].pad_mode.lower() != 'valid':
            if isinstance(layers[0], nn.Conv2d):
                if layers[0].has_bias:
                    bias = layers[0].bias
                else:
                    bias = 'zeros'
                self.fused_layer[0] = nn.Conv2d(self.in_channels, self.out_channels, self.kernel_size, stride=self.stride, pad_mode='valid', weight_init=layers[0].weight, has_bias=layers[0].has_bias, bias_init=bias)
            elif isinstance(layers[0], nn.MaxPool2d):
                self.fused_layer[0] = nn.MaxPool2d(self.kernel_size,self.stride,pad_mode='VALID')

    def construct(self, x):
        x = self.layer(x)
        return x

    def switch_to_mode(self, mode):
        # 当以分块融合方式计算是，卷积池化都以pad_mode为valid的形式执行，否则按照原方式执行
        self.mode = mode
        if mode=='fused':
            self.layer = nn.SequentialCell(self.fused_layer)
        else:
            self.layer = nn.SequentialCell(self.origin_layer)


class Bypass(nn.Cell):

    """
    把bypass封装成类，用于自动执行相加后激活的操作
    """
    def __init__(self, down_sample, activate, h = 3):

        super(Bypass, self).__init__()
        self.down_sample = down_sample
        self.activate = activate
        self.h = h
        if down_sample:
            self.kernel_size, self.stride, self.p = get_layer_param(down_sample[0])
        else:
            self.kernel_size, self.stride = 1, 1

    def construct(self, identity, x):

        if self.down_sample:
            identity = self.down_sample(identity)

        result = identity + x

        if self.activate:
            result = self.activate(identity + x)
        return result


class Reducemean(nn.Cell):

    """
    把类似reducemean需要携带参数的操作封装成nn.Cell类
    """
    def __init__(self,reducemean, axis):

        super(Reducemean, self).__init__()
        self.reducemean = reducemean
        self.axis = axis

    def construct(self, x):

        return self.reducemean(x, self.axis)



def get_layer_param(layer):
    """
    获取当前layer的kernel_size, stride, padding等关键参数
    """

    stride = 1
    kernel_size = 1
    p = [0,0]
    if hasattr(layer, 'kernel_size'):
        if isinstance(layer.kernel_size, tuple):
            kernel_size = layer.kernel_size[0]
        else:
            kernel_size = layer.kernel_size

    if hasattr(layer, 'stride'):
        if isinstance(layer.stride, tuple):
            stride = layer.stride[0]
        else:
            stride = layer.stride

    if hasattr(layer, 'pad_mode'):
        if layer.pad_mode.lower() == 'same':
            p = (kernel_size - 1) // 2
            p = [p,p]
        elif layer.pad_mode.lower() == 'pad':
            p = layer.padding
            p = [p,p]
    if isinstance(layer, nn.Pad):
            p = [layer.paddings[2][0],layer.paddings[3][1]]

    return kernel_size, stride, p

