'''
Function:
    Define InvertedResidual Module
Author:
    Zhenchao Jin
'''
# import torch
# import torch.nn as nn
import numpy as np
import luojianet
from luojianet import nn, ops, Parameter, Tensor
import luojianet.ops as ops
# from activation import BuildActivation
# from normalization import BuildNormalization, constructnormcfg
from ..activation import BuildActivation
from ..normalization import BuildNormalization, constructnormcfg

# from ..activation import BuildActivation
from .apconv import AdptivePaddingConv2d
from .seconv import SqueezeExcitationConv2d
# from ..normalization import BuildNormalization, constructnormcfg


from collections import OrderedDict

'''InvertedResidual'''
class InvertedResidualConv2d(nn.Module):
    def __init__(self, in_channels, out_channels, stride, expand_ratio=1, dilation=1, norm_cfg=None, act_cfg=None):
        super(InvertedResidualConv2d, self).__init__()
        assert stride in [1, 2], 'stride must in [1, 2], but received %s' % stride
        self.use_res_connect = stride == 1 and in_channels == out_channels
        hidden_dim = int(round(in_channels * expand_ratio))
        # layers = []
        layers = OrderedDict()
        if expand_ratio != 1:
            # layer = nn.SequentialCell()
            # layer.append('conv', nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, has_bias=False))
            layers['conv1'] = nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, has_bias=False)
            if norm_cfg is not None:
                # layer.append('bn', BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg)))
                layers['bn1'] = BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg))
            if act_cfg is not None:
                # layer.append('activation', BuildActivation(act_cfg))
                layers['activation1'] = BuildActivation(act_cfg)
            # layers.append(layer)
        # layer = nn.SequentialCell()
        # layer.append('conv', nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, group=hidden_dim, has_bias=False, pad_mode='pad'))
        layers['conv2'] = nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, group=hidden_dim, has_bias=False, pad_mode='pad')
        if norm_cfg is not None:
            # layer.append('bn', BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg)))
            layers['bn2'] = BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg))
        if act_cfg is not None:
            # layer.append('activation', BuildActivation(act_cfg))
            layers['activation2'] = BuildActivation(act_cfg)
        # layers.extend([layer])
        # layer = nn.SequentialCell()
        # layer.append('conv', nn.Conv2d(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False))
        layers['conv3'] = nn.Conv2d(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False)
        if norm_cfg is not None:
            # layer.append('bn', BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg)))
            layers['bn3'] = BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg))
        # layers.extend([layer])
        # self.conv = nn.SequentialCell(*layers)
        self.conv = nn.SequentialCell(layers)

        # layers = []
        # if expand_ratio != 1:
        #     layer = nn.SequentialCell()
        #     layer.insert_child_to_cell('conv', nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, has_bias=False))
        #     if norm_cfg is not None:
        #         layer.insert_child_to_cell('bn', BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg)))
        #     if act_cfg is not None:
        #         layer.insert_child_to_cell('activation1', BuildActivation(act_cfg))
        #     layers.append(layer)
        # layer = nn.SequentialCell()
        # layer.insert_child_to_cell('conv', nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, group=hidden_dim, has_bias=False, pad_mode='pad'))
        # if norm_cfg is not None:
        #     layer.insert_child_to_cell('bn', BuildNormalization(constructnormcfg(placeholder=hidden_dim, norm_cfg=norm_cfg)))
        # if act_cfg is not None:
        #     layer.insert_child_to_cell('activation', BuildActivation(act_cfg))
        # layers.extend([layer])
        # layer = nn.SequentialCell()
        # layer.insert_child_to_cell('conv', nn.Conv2d(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False))
        # if norm_cfg is not None:
        #     layer.insert_child_to_cell('bn', BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg)))
        # layers.extend([layer])
        # self.conv = nn.SequentialCell(*layers)

    '''forward'''
    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)


'''InvertedResidualV3'''
class InvertedResidualConv2dV3(nn.Module):
    def __init__(self, in_channels, out_channels, mid_channels=64, kernel_size=3, stride=1, se_cfg=None, with_expand_conv=True, norm_cfg=None, act_cfg=None):
        super(InvertedResidualConv2dV3, self).__init__()
        assert stride in [1, 2], 'stride must in [1, 2], but received %s' % stride
        self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
        self.with_expand_conv = with_expand_conv
        if not self.with_expand_conv: assert mid_channels == in_channels
        if self.with_expand_conv:
            # self.expand_conv = nn.SequentialCell()
            layers = OrderedDict()
            # self.expand_conv.append('conv', nn.Conv2d(in_channels, mid_channels, kernel_size=1, stride=1, padding=0, has_bias=False))
            layers['conv1'] = nn.Conv2d(in_channels, mid_channels, kernel_size=1, stride=1, padding=0, has_bias=False)
            if norm_cfg is not None:
                # self.expand_conv.append('bn', BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg)))
                layers['bn1'] = BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg))
            if act_cfg is not None:
                # self.expand_conv.append('activation', BuildActivation(act_cfg))
                layers['activation1'] = BuildActivation(act_cfg)
            self.expand_conv = nn.SequentialCell(layers)
        # self.depthwise_conv = nn.SequentialCell()
        if stride == 2:
            # self.depthwise_conv.append('conv', AdptivePaddingConv2d(mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, group=mid_channels, has_bias=False))
            layers = OrderedDict()
            layers['conv2'] = AdptivePaddingConv2d(mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, groups=mid_channels, bias=False)
            if norm_cfg is not None:
                # self.depthwise_conv.append('bn', BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg)))
                layers['bn2'] = BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg))
            if act_cfg is not None:
                # self.depthwise_conv.append('activation', BuildActivation(act_cfg))
                layers['activation2'] = BuildActivation(act_cfg)
            self.depthwise_conv = nn.SequentialCell(layers)

        else:
            # self.depthwise_conv.append('conv', nn.Conv2d(mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, group=mid_channels, has_bias=False))
            layers = OrderedDict()
            layers['conv2'] = nn.Conv2d(mid_channels, mid_channels, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, group=mid_channels, has_bias=False, pad_mode='pad')
            if norm_cfg is not None:
                # self.depthwise_conv.append('bn', BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg)))
                layers['bn2'] = BuildNormalization(constructnormcfg(placeholder=mid_channels, norm_cfg=norm_cfg))
            if act_cfg is not None:
                # self.depthwise_conv.append('activation', BuildActivation(act_cfg))
                layers['activation2'] = BuildActivation(act_cfg)
            self.depthwise_conv = nn.SequentialCell(layers)

        if se_cfg is not None:
            self.se = SqueezeExcitationConv2d(**se_cfg)
        # self.linear_conv = nn.SequentialCell()
        layers = OrderedDict()
        # self.linear_conv.append('conv', nn.Conv2d(mid_channels, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False))
        layers['conv3'] = nn.Conv2d(mid_channels, out_channels, kernel_size=1, stride=1, padding=0, has_bias=False)
        if norm_cfg is not None:
            # self.linear_conv.append('bn', BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg)))
            layers['bn3'] = BuildNormalization(constructnormcfg(placeholder=out_channels, norm_cfg=norm_cfg))
        self.linear_conv = nn.SequentialCell(layers)

    '''forward'''
    def forward(self, x):
        out = x
        if self.with_expand_conv: out = self.expand_conv(out)
        out = self.depthwise_conv(out)
        if hasattr(self, 'se'): out = self.se(out)
        out = self.linear_conv(out)
        if self.with_res_shortcut:
            return x + out
        return out