from collections import OrderedDict
import torch.nn as nn



# 卷积 标准化 激活函数
def conv_bnr(inp, oup, stride=1):
    return nn.Sequential(
        nn.Conv2d(inp, oup, (3,3), (stride,stride), 1, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU6()
    )

# 深度可分离卷积块depthwise separable convolution
def conv_dsc(inp, oup, stride=1):
    return nn.Sequential(
        # 分组卷积(深度卷积) 组数为通道数
        nn.Conv2d(inp, inp, (3,3), (stride,stride), 1, groups=inp, bias=False),
        # 归一化
        nn.BatchNorm2d(inp),
        # 激活函数
        nn.ReLU6(),

        # 1x1普通卷积 将通道数变为1 像素的融合
        nn.Conv2d(inp, oup, (1,1), (1,1), 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU6()
    )


class MobileNetV1(nn.Module):
    def __init__(self):
        super(MobileNetV1, self).__init__()
        self.stage1 = nn.Sequential(
            # 160,160,3 -> 80,80,32
            conv_bnr(3, 32, 2),
            # 80,80,32 -> 80,80,64
            conv_dsc(32, 64, 1),

            # 80,80,64 -> 40,40,128
            conv_dsc(64, 128, 2),
            conv_dsc(128, 128, 1),

            # 40,40,128 -> 20,20,256
            conv_dsc(128, 256, 2),
            conv_dsc(256, 256, 1),
        )
        self.stage2 = nn.Sequential(
            # 20,20,256 -> 10,10,512
            conv_dsc(256, 512, 2),
            conv_dsc(512, 512, 1),
            conv_dsc(512, 512, 1),
            conv_dsc(512, 512, 1),
            conv_dsc(512, 512, 1),
            conv_dsc(512, 512, 1),
        )
        self.stage3 = nn.Sequential(
            # 10,10,512 -> 5,5,1024
            conv_dsc(512, 1024, 2),
            conv_dsc(1024, 1024, 1),
        )

        # 分类时使用 将在Facenet.py中delete掉
        self.avg = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(1024, 1000)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.normal_(m.weight, 0, 0.1)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.stage1(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.avg(x)
        # x = self.model(x)
        x = x.view(-1, 1024)
        x = self.fc(x)
        return x

# 采用mobilenetV3特征提取网络

#定义基本的Conv_Bn_activate
class baseConv(nn.Module):
    def __init__(self, inchannel, outchannel, kernel_size=1, stride=1, groups=1, active=False, bias=False):
        super(baseConv, self).__init__()

        #定义使用的激活函数
        if active == 'HS':
            self.ac = nn.Hardswish
        elif active == 'RE':
            self.ac = nn.ReLU6
        else:
            self.ac = nn.Identity
        # padding大小
        padding = kernel_size//2

        self.base = nn.Sequential(
            nn.Conv2d(in_channels=inchannel, out_channels=outchannel, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=bias),
            nn.BatchNorm2d(outchannel),   # 标准化
            self.ac()
        )

    def forward(self, x):
        x = self.base(x)
        return x

#定义SE模块
class SEModule(nn.Module):
    def __init__(self, inchannels):
        super(SEModule, self).__init__()
        hidden_channel = int(inchannels/4)
        self.pool = nn.AdaptiveAvgPool2d((1,1))      # 1x1的自适应平均池化层
        self.linear1 = nn.Sequential(
            nn.Conv2d(inchannels, hidden_channel, kernel_size=1, stride=1, padding=0, bias=True),
            nn.ReLU6()
        )
        self.linear2 = nn.Sequential(
            nn.Conv2d(hidden_channel, inchannels, kernel_size=1, stride=1, padding=0, bias=True),
            nn.Hardswish()
        )

    def forward(self,x):
        out = self.pool(x)
        out = self.linear1(out)
        out = self.linear2(out)
        return out*x

#定义bneck模块
class bneckModule(nn.Module):
    def __init__(self, inchannels, expand_channels, outchannels, kernel_size, stride, SE, activate):
        super(bneckModule, self).__init__()

        self.moduleSeq = []                        # 需要构建网络层结构
        if inchannels != expand_channels:          # 大小不等就进行升维
            self.moduleSeq.append(baseConv(
                inchannel=inchannels, outchannel=expand_channels, active=activate))
        self.moduleSeq.append(baseConv(
            inchannel=expand_channels, outchannel=expand_channels,
            kernel_size=kernel_size, active=activate, groups=expand_channels))
        # 判断是否存在SE模块
        if SE == True:
            self.moduleSeq.append(SEModule(inchannels=expand_channels))
        self.moduleSeq.append(baseConv(inchannel=expand_channels, outchannel=outchannels))

        self.module = nn.Sequential(
            *self.moduleSeq
        )
        #判断是否有残差结构
        self.residual = False
        if inchannels == outchannels and stride == 1:
            self.residual = True

    def forward(self,x):
        out = self.module(x)
        if self.residual:
            return out + x
        else:
            return out


#mobilenetV3结构
class MobileNetV3(nn.Module):
    def __init__(self, init_weight=True):
        super(MobileNetV3, self).__init__()

        # [inchannel, expand_channels, outchannels, kernel_size, stride, SE, activate]
        # 详细的网络结构可以查看图片
        net_structure = [[16, 16, 16, 3, 1, False, 'HS'],
                         [16, 64, 24, 3, 2, False, 'RE'],
                         [24, 72, 24, 3, 1, False, 'RE'],
                         [24, 72, 40, 5, 2, True, 'RE'],
                         [40, 120, 40, 5, 1, True, 'RE'],
                         [40, 120, 40, 5, 1, True, 'RE'],
                         [40, 240, 80, 3, 2, False, 'HS'],
                         [80, 200, 80, 3, 1, False, 'HS'],
                         [80, 184, 80, 3, 1, False, 'HS'],
                         [80, 184, 80, 3, 1, False, 'HS'],
                         [80, 480, 112, 3, 1, True, 'HS'],
                         [112, 672, 112, 3, 1, True, 'HS'],
                         [112, 672, 160, 5, 2, True, 'HS'],
                         [160, 960, 160, 5, 1, True, 'HS'],
                         [160, 960, 160, 5, 1, True, 'HS']]

        #定义一个有序字典存放网络结构
        modules = OrderedDict()
        modules.update({'layer1':baseConv(inchannel=3, kernel_size=3, outchannel=16, stride=2, active='HS')})

        #开始配置各个Bneck
        for idx,layer in enumerate(net_structure):
            modules.update({'bneck_{}'.format(idx):bneckModule(layer[0], layer[1], layer[2], layer[3], layer[4], layer[5], layer[6])})
        # 一层卷积 一层池化
        modules.update({'conv_1*1':baseConv(layer[2], 960, 1, stride=1, active='HS')})
        modules.update({'pool':nn.AdaptiveAvgPool2d((1,1))})

        self.module=nn.Sequential(modules)

        self.features=nn.Sequential(
            nn.Linear(960,1280),
            nn.Hardswish(),
            nn.Dropout(p=0.2),
            nn.Linear(1280,128)
        )

        if init_weight:
            self.init_weight()

    def init_weight(self):
        for w in self.modules():
            if isinstance(w, nn.Conv2d):
                nn.init.kaiming_normal_(w.weight, mode='fan_out')
                if w.bias is not None:
                    nn.init.zeros_(w.bias)
            elif isinstance(w, nn.BatchNorm2d):
                nn.init.ones_(w.weight)
                nn.init.zeros_(w.bias)
            elif isinstance(w, nn.Linear):
                nn.init.normal_(w.weight, 0, 0.01)
                nn.init.zeros_(w.bias)


    def forward(self,x):
        out=self.module(x)
        out=out.view(out.size(0),-1)
        out=self.features(out)
        return out





