import math

import torch
from torch import nn




class SELayer(nn.Module):
    def __init__(self, channel, reduction=4):
        super(SELayer, self).__init__()
        # Squeeze
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        # Excitation(FC+ReLU+FC+Sigmoid)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel),
            nn.Hardsigmoid(inplace=True)
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x)
        y = y.view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y


class conv_bn_hswish(nn.Module):
    """
    This equals to
    def conv_3x3_bn(inp, oup, stride):
        return nn.Sequential(
            nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
            nn.BatchNorm2d(oup),
            h_swish()
        )
    """

    def __init__(self, c1, c2, stride):
        super(conv_bn_hswish, self).__init__()
        self.conv = nn.Conv2d(c1, c2, 3, stride, 1, bias=False)
        self.bn = nn.BatchNorm2d(c2)
        self.act = nn.Hardswish(inplace=True)

    def forward(self, x):
        return self.act(self.bn(self.conv(x)))

    def fuseforward(self, x):
        return self.act(self.conv(x))


class MobileNet_Block(nn.Module):
    def __init__(self, inp, oup, hidden_dim, kernel_size, stride, use_se, use_hs):
        super(MobileNet_Block, self).__init__()
        assert stride in [1, 2]

        self.identity = stride == 1 and inp == oup

        if inp == hidden_dim:
            self.conv = nn.Sequential(
                # dw
                nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2,
                          groups=hidden_dim,
                          bias=False),
                nn.BatchNorm2d(hidden_dim),
                nn.Hardswish(inplace=True) if use_hs else nn.ReLU(inplace=True),
                # Squeeze-and-Excite
                SELayer(hidden_dim) if use_se else nn.Sequential(),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )
        else:

            self.conv = nn.Sequential(
                # pw
                nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
                nn.BatchNorm2d(hidden_dim),
                nn.Hardswish(inplace=True) if use_hs else nn.ReLU(inplace=True),
                # dw
                nn.Conv2d(hidden_dim, hidden_dim, kernel_size, stride, (kernel_size - 1) // 2,
                          groups=hidden_dim,
                          bias=False),
                nn.BatchNorm2d(hidden_dim),
                # Squeeze-and-Excite
                SELayer(hidden_dim) if use_se else nn.Sequential(),
                nn.Hardswish(inplace=True)if use_hs else nn.ReLU(inplace=True),
                # pw-linear
                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
                nn.BatchNorm2d(oup),
            )

    def forward(self, x):
        y = self.conv(x)
        if self.identity:
            return x + y
        else:
            return y


# CA
class CoordAtt(nn.Module):
    def __init__(self, inp, oup, reduction=32):
        super(CoordAtt, self).__init__()
        self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        self.pool_w = nn.AdaptiveAvgPool2d((1, None))
        mip = max(8, inp // reduction)
        self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(mip)
        self.act = nn.Hardswish(inplace=True)
        self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
        self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)

    def forward(self, x):
        identity = x
        n, c, h, w = x.size()
        # c*1*W
        x_h = self.pool_h(x)
        # c*H*1
        # C*1*h
        x_w = self.pool_w(x).permute(0, 1, 3, 2)
        y = torch.cat([x_h, x_w], dim=2)
        # C*1*(h+w)
        y = self.conv1(y)
        y = self.bn1(y)
        y = self.act(y)
        x_h, x_w = torch.split(y, [h, w], dim=2)
        x_w = x_w.permute(0, 1, 3, 2)
        a_h = self.conv_h(x_h).sigmoid()
        a_w = self.conv_w(x_w).sigmoid()
        out = identity * a_w * a_h
        return out

class MobileNet_V3_small(nn.Module):
    def __init__(self):
        super().__init__()
        self.layer0 = conv_bn_hswish(3, 16, 2)                  # 0-p1/2  32 320 320
        self.layer1 = MobileNet_Block(16, 16, 16, 3, 2, 1, 0)   # 1-p2/4   -> 32 160 160
        self.layer2 = MobileNet_Block(16, 24, 72, 3, 2, 0, 0)    # 2-p3/8   64 80 80
        self.layer3 = MobileNet_Block(24, 24, 88, 3, 1, 0, 0)  # 3-p3/8   64 80 80
        self.layer4 = MobileNet_Block(24, 40, 96, 5, 2, 1, 1)  # 4-p4/16  96 40 40
        self.layer5 = MobileNet_Block(40, 40, 240, 5, 1, 1, 1)  # 5-p4/16  96 40 40
        self.layer6 = MobileNet_Block(40, 40, 240, 5, 1, 1, 1)  # 6-p4/16  96 40 40
        self.layer7 = MobileNet_Block(40, 48, 120, 5, 1, 1, 1)  # 7-p4/16  96 40 40
        self.layer8 = MobileNet_Block(48, 48, 144, 5, 1, 1, 1)  # 8-p4/16  96 40 40
        self.layer9 = MobileNet_Block(48, 96, 288, 5, 2, 1, 1)  # 9-p5/32  128 20 20
        self.layer10 = MobileNet_Block(96, 96, 576, 5, 1, 1, 1)     # 10-p5/32 128 20 20
        self.layer11 = MobileNet_Block(96, 96, 576, 5, 1, 1, 1)     # 11-p5/32 128 20 20
        self.hidden = 576
        self.out = 96

        self.conv2 = nn.Sequential(
            nn.Conv2d(self.out, self.hidden, 1, 1),
            nn.BatchNorm2d(self.hidden),
            SELayer(self.hidden),
            nn.Hardswish(inplace=True)
        )

        self.pool = nn.AdaptiveAvgPool2d((1))
        self.conv3 = nn.Sequential(
            nn.Conv2d(self.hidden, 1024, 1, 1),
            nn.Hardswish(inplace=True),

        )
    def forward(self, x):
        x = self.layer0(x)
        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)
        x = self.layer5(x)
        x = self.layer6(x)
        x = self.layer7(x)
        x = self.layer8(x)
        x = self.layer9(x)
        x = self.layer10(x)
        x = self.layer11(x)
        x = self.conv2(x)
        x = self.pool(x)
        x = self.conv3(x)

        return x

    def load_param(self, model_path):
        param_dict = torch.load(model_path)
        for i in param_dict:
            if 'fc' in i:
                continue
            self.state_dict()[i].copy_(param_dict[i])

    def random_init(self):
        # for m in self.modules():
        #     if isinstance(m, nn.Conv2d):
        #         n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
        #         m.weight.data.normal_(0, math.sqrt(2. / n))
        #     elif isinstance(m, nn.BatchNorm2d):
        #         m.weight.data.fill_(1)
        #         m.bias.data.zero_()

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)

if __name__ == "__main__":
    import time

    # mode = MobileNet_V3_small().cuda()
    # input= torch.randn(1,3,640,640).cuda()

    # mode = MobileNet_V3_small()
    import torchvision
    mode = torchvision.models.shufflenet_v2_x1_0(pretrained=False)
    input = torch.randn(1, 3, 248, 124)

    start = time.time()
    out = mode(input)
    end = time.time()
    print((end - start)*1000)

# import torch
# import torch.nn as nn
# import torch.nn.functional as F
#
# class h_swish(nn.Module):
#     def forward(self,x):
#         return x*F.relu6(x+3)/6
#
# class swish(nn.Module):
#     def forward(self,x):
#         return x*F.sigmoid(x)
#
# class h_sigmoid(nn.Module):
#     def forward(self,x):
#         return F.relu6(x+3)/6
#
# def _make_divisor(ch, divisor, min_ch = None):
#     if not min_ch:
#         min_ch = divisor
#     new_ch = max(min_ch,int(ch+divisor/2)//divisor*divisor)
#     if new_ch < 0.9*ch:
#         new_ch += divisor
#     return new_ch
#
# class SE_module(nn.Module):
#     def __init__(self,inchannel):
#         super(SE_module, self).__init__()
#         self.se = nn.Sequential(
#             nn.AdaptiveAvgPool2d((1,1)),
#             nn.Conv2d(inchannel,inchannel//4,1,1),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(inchannel//4,inchannel,1),
#             h_sigmoid()
#         )
#     def forward(self,x):
#         mul = self.se(x)
#         return x * mul
#
# class bneck(nn.Module):
#     def __init__(self,inchannel,outchannel,hidden_channel,nonlinear,stride,SE=False):
#         super(bneck, self).__init__()
#         self.shortcut = True if stride == 1 and inchannel == outchannel else False
#
#         layers = []
#         if inchannel != hidden_channel:
#             layers.extend([
#                 nn.Conv2d(inchannel,hidden_channel,1),
#                 nn.BatchNorm2d(hidden_channel),
#                 nonlinear()
#             ])
#         layers.extend([
#             nn.Conv2d(hidden_channel,hidden_channel,3,1,1,groups = hidden_channel),
#             nn.BatchNorm2d(hidden_channel),
#             nonlinear()
#         ])
#         self.conv1 = nn.Sequential(*layers)
#         self.se = SE_module(hidden_channel)
#         self.conv2 = nn.Sequential(
#             nn.Conv2d(hidden_channel,outchannel,1,1),
#             nn.BatchNorm2d(outchannel)
#         )
#     def forward(self,x):
#         x = self.conv1(x)
#         x = self.conv2(self.se(x))
#         return x
#
#
# class MobileNet_V3(nn.Module):
#     def __init__(self, setting, inchannel=3, alpha=0.2, round_nearest = 8):
#         super(MobileNet_V3, self).__init__()
#         input_channel = _make_divisor(16*alpha,round_nearest)
#         last_channel = _make_divisor(setting[-1][3]*alpha,round_nearest)
#
#         self.HS = h_swish
#         self.RE = nn.ReLU
#
#         self.conv1 = nn.Sequential(nn.Conv2d(inchannel,input_channel,3,2,1),
#                                    nn.BatchNorm2d(input_channel),
#                                    self.HS()
#                                    )
#
#         self.block = bneck
#         self.blocks = nn.ModuleList([])
#         self.nonlin = self.HS
#         for _, kernel_size, hidden, out_channels, SE, nonlinear, stride in setting:
#             self.nonlin = self.RE if nonlinear == 'RE' else self.HS
#             self.hidden = _make_divisor(hidden*alpha,round_nearest)
#             out_channels = _make_divisor(out_channels*alpha,round_nearest)
#             self.blocks.append(self.block(input_channel, out_channels, self.hidden, self.nonlin, stride, SE))
#             input_channel = out_channels
#
#         self.conv2 = nn.Sequential(
#             nn.Conv2d(input_channel, self.hidden, 1,1),
#             nn.BatchNorm2d(self.hidden),
#             SE_module(self.hidden),
#             self.HS()
#         )
#
#         self.pool = nn.AdaptiveAvgPool2d((1))
#         self.conv3 = nn.Sequential(
#             nn.Conv2d(self.hidden, 1024, 1, 1),
#             self.HS(),
#             # nn.Dropout(0.2),
#             # nn.Conv2d(1024,classes,1,1)
#         )
#
#     def load_param(self, model_path):
#         param_dict = torch.load(model_path)
#         for i in param_dict:
#             if 'fc' in i:
#                 continue
#             self.state_dict()[i].copy_(param_dict[i])
#
#     def random_init(self):
#         # for m in self.modules():
#         #     if isinstance(m, nn.Conv2d):
#         #         n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#         #         m.weight.data.normal_(0, math.sqrt(2. / n))
#         #     elif isinstance(m, nn.BatchNorm2d):
#         #         m.weight.data.fill_(1)
#         #         m.bias.data.zero_()
#
#         for m in self.modules():
#             if isinstance(m, nn.Conv2d):
#                 nn.init.kaiming_normal_(m.weight, mode='fan_out')
#                 if m.bias is not None:
#                     nn.init.zeros_(m.bias)
#             elif isinstance(m, nn.BatchNorm2d):
#                 nn.init.ones_(m.weight)
#                 nn.init.zeros_(m.bias)
#
#     def forward(self,x):
#         x = self.conv1(x)
#         for block in self.blocks:
#             x = block(x)
#         x = self.conv2(x)
#         x = self.pool(x)
#         x = self.conv3(x)
#         return x
#
#
#
# def MobileNet_V3_large():
#     setting = [
#         [16, 3, 16, 16, False, 'RE', 1],
#         [16, 3, 64, 24, False, 'RE', 2],
#         [24, 3, 72, 24, False, 'RE', 1],
#         [24, 5, 72, 40, True, 'RE', 2],
#         [40, 5, 120, 40, True, 'RE', 1],
#         [40, 5, 120, 40, True, 'RE', 1],
#         [40, 3, 240, 80, False, 'HS', 2],
#         [80, 3, 200, 80, False, 'HS', 1],
#         [80, 3, 184, 80, False, 'HS', 1],
#         [80, 3, 184, 80, False, 'HS', 1],
#         [80, 3, 480, 112, True, 'HS', 1],
#         [112, 3, 672, 112, True, 'HS', 1],
#         [112, 5, 672, 160, True, 'HS', 2],
#         [160, 5, 960, 160, True, 'HS', 1],
#         [160, 5, 960, 160, True, 'HS', 1]
#     ]
#     return MobileNet_V3(setting)
#
# def MobileNet_V3_small():
#     setting = [
#         [16, 3, 16, 16, True, 'RE', 2],
#         [16, 3, 72, 24, False, 'RE', 2],
#         [24, 3, 88, 24, False, 'RE', 1],
#         [24, 5, 96, 40, True, 'HS', 2],
#         [40, 5, 240, 40, True, 'HS',1],
#         [40, 5, 240, 40, True, 'HS', 1],
#         [40, 5, 120, 48, True, 'HS',1],
#         [48, 5, 144, 48, True, 'HS', 1],
#         [48, 5, 288, 96, True, 'HS',2],
#         [96, 5, 576, 96, True, 'HS',1],
#         [96, 5, 576, 96, True, 'HS',1]
#
#     ]
#     return MobileNet_V3(setting)
#
# if __name__ == '__main__':
#     input = torch.empty(1,3,224,224)
#     m = MobileNet_V3_small(3,10)
#     out = m(input)
#     print(out)
