import math
import torch
import torch.nn as nn
from . GroupConvBN import GroupConvBN

class MDW_Block(nn.Module):
    # 当设置projection=True时，hidden_channels必须等于out_channels
    # 只有设置了res_option，才可能有残差结构,当然还要看stride和inp==oup
    def __init__(self,
                 inp,
                 oup,
                 hidden_sz,
                 ks = 3, 
                 dw_num = 4,
                 stride = 1,
                 dilation = 1,
                 e_bn = False,
                 e_act = None,
                 proj = False,
                 proj_bn = True,
                 res = False,
                 dw_act = 'gelu'
                 ):
        super(MDW_Block, self).__init__()
        self.stride = stride
        self.kernel_size = ks
        self.padding = (ks-1)*dilation//2    #(kernel_size-1)*dilation//2
        self.dilation = dilation 

        assert stride in [1, 2]
        assert isinstance(dw_num, int) and dw_num > 0

        self.out_channels = oup
        self.hidden_channels = hidden_sz
        self.depthwise_num = dw_num
        self.projection = proj

        self.identity = stride == 1 and inp==oup and res == True
        #使用残差是为了弥补中间的dw_layer的relu对非当前线性变换的兴趣流形信息的破坏
        if self.identity:
            self.alpha = nn.Parameter(torch.full((oup, 1, 1), 0.5))
        # 这里修改残差连接方式，从shourtcut与输出相加改为和depthwise conv输出相加,事实证明不管怎样，加了Res都会降低性能
        # self.identity = stride == 1 and res_option == True
        # if self.identity:
        #     self.alpha = nn.Parameter(torch.full((out_channels, 1, 1), 0.5))

        self.poitwise = GroupConvBN(inp, hidden_sz, ks=1, stride=1, groups=1, bn=e_bn, act=e_act)
        self.depthwise = nn.Sequential()

        for i in range(dw_num):
            if i == dw_num -1:
                self.depthwise.add_module(f'dw_{i}', GroupConvBN(hidden_sz, hidden_sz, ks=ks, stride=stride, groups=hidden_sz, act=dw_act))
            else:
                self.depthwise.add_module(f'dw_{i}', GroupConvBN(hidden_sz, hidden_sz, ks=ks, stride=1, groups=hidden_sz, act=dw_act))
        
        if proj:
            self.proj_layer = nn.Sequential(
                GroupConvBN(hidden_sz, oup, 1, 1, bn=proj_bn, act=None)
            )
        else:
            assert self.hidden_channels == self.out_channels
                
        self._initialize_weights()

    def forward(self, x):
        #两种残差连接方式，一种是和proj输出相加，一种和depthwise输出相加，但无论哪种，好像都会降低性能
        shortcut = x
        x = self.depthwise(self.poitwise(x))
        if self.projection:
            x = self.proj_layer(x)
        if self.identity:
            # x = self.alpha*shortcut + (1-self.alpha)*x
            x = shortcut + x
        return x  

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.01)
                m.bias.data.zero_()

