import torch.nn as nn
import torch

__all__ = ['MixResNet', 'MixBlock', 'get_net_light']


class BasicBlock(nn.Module):
    def __init__(self,
                 inplanes,
                 outplanes,
                 spatial_stride=1,
                 downsample=None,
                 se_ratio=None,
                 use_swish=True,
                 use_2d=False,):
        super().__init__()

        self.inplanes = inplanes
        self.outplanes = outplanes
        self.spatial_stride = spatial_stride
        self.downsample = downsample
        self.se_ratio = se_ratio
        self.use_swish = use_swish
        self.use_2d = use_2d
        self.channels = [24, 48, 96, 192]
        self.spatial = [28, 14, 7, 4]

        self.conv1 = nn.Sequential(
            # pw
            nn.Conv3d(inplanes, outplanes, 1, 1, 0, bias=False),
            nn.BatchNorm3d(outplanes),
            nn.ReLU(inplace=True),
            # dw
            nn.Conv3d(outplanes, outplanes, 3, stride=(1, self.spatial_stride, self.spatial_stride), padding=1, groups=outplanes, bias=False),
            nn.BatchNorm3d(outplanes),
            nn.ReLU(inplace=True)
        )

        self.conv2 = nn.Sequential(
            nn.Conv3d(outplanes, outplanes, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm3d(outplanes)
        )

        # 这儿是加入的2D卷积
        self.conv3 = nn.Sequential(
            nn.Conv3d(outplanes, outplanes, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1), bias=False, groups=outplanes),
            nn.BatchNorm3d(outplanes),
            nn.ReLU(inplace=True),

            nn.Conv3d(outplanes, outplanes, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm3d(outplanes)
        )

        # 这儿是遇到下采样时的2D卷积
        self.conv_down = nn.Sequential(
            nn.Conv3d(inplanes, inplanes, 1, stride=(1, 2, 2), padding=0, bias=False, groups=inplanes),
            nn.BatchNorm3d(inplanes),
            nn.ReLU(inplace=True),

            nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm3d(outplanes)
        )

        # 这儿是采用了最为轻量级别的注意力网络，可以根据不同的目的采用不同的网络；后面跟着空间注意力网络SAM
        if self.se_ratio is not None:
            self.se_module = SEModule(outplanes, self.se_ratio)
            self.sam = SAM(self.spatial[self.channels.index(self.outplanes)])

        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):

        out = self.conv1(x)
        out = self.conv2(out)

        if self.se_ratio is not None:
            out = self.se_module(out)
            out = self.sam(out)

        # 遇到下采样就下采样，否则进行联合卷积
        if self.downsample is not None:
            identity = self.conv_down(x)
        else:
            identity = self.conv3(x)

        out = out + identity
        out = self.relu(out)

        return out


class MixResNet(nn.Module):
    """
    MiCTResNet is a ResNet backbone augmented with five 3D cross-domain
    residual convolutions.

    The model operates on 5D tensors but since 2D CNNs expect 4D input,
    the data is transformed many times to 4D and then transformed back
    to 5D when necessary. For efficiency only one 2D convolution is
    performed for each kernel by vertically stacking the features maps
    of each video clip contained in the batch.

    This models is inspired from the work by Y. Zhou, X. Sun, Z-J Zha
    and W. Zeng: MiCT: Mixed 3D/2D Convolutional Tube for Human Action
    Recognition.
    """

    def __init__(self, block, layers, dropout, n_classes, **kwargs):
        """
        :param block: the block class, either BasicBlock or Bottleneck.
        :param layers: the number of blocks for each for each of the
            four feature depth.
        :param dropout: dropout rate applied during training.
        :param n_classes: the number of classes in the dataset.
        """
        super(MixResNet, self).__init__(**kwargs)

        self.baseplanes = 24
        self.dropout = dropout
        self.num_classes = n_classes

        self.conv1 = nn.Sequential(
            nn.Conv3d(3, self.baseplanes, kernel_size=(1, 3, 3), stride=(1, 2, 2),
                      padding=(0, 1, 1), bias=False),
            nn.BatchNorm3d(self.baseplanes),
            nn.ReLU(inplace=True),

            nn.Conv3d(self.baseplanes, self.baseplanes, kernel_size=(5, 1, 1), stride=1,
                      padding=(2, 0, 0), groups=self.baseplanes, bias=False),
            nn.BatchNorm3d(self.baseplanes),
            nn.ReLU(inplace=True),
        )

        # 这儿施加了两个2D卷积层
        self.conv2 = nn.Sequential(
            nn.Conv3d(self.baseplanes, self.baseplanes, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm3d(self.baseplanes),
            nn.ReLU(inplace=True),

            nn.Conv3d(self.baseplanes, self.baseplanes, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1), bias=False, groups=self.baseplanes),
            nn.BatchNorm3d(self.baseplanes),
            nn.ReLU(inplace=True)
        )

        self.layer1 = MixBlock(block, 24, 24, layers[0], se_style='null', use_2d=True)
        self.layer2 = MixBlock(block, 24, 48, layers[1], se_style='null', use_2d=True)
        self.layer3 = MixBlock(block, 48, 96, layers[2], se_style='all', use_2d=True)
        self.layer4 = MixBlock(block, 96, 192, layers[3], se_style='all')

        self.conv5 = nn.Sequential(
            nn.Conv3d(192, 192, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm3d(192),
            nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool3d(1)
        )

        self.conv6 = nn.Sequential(
            nn.Linear(192, 192),
            nn.ReLU(inplace=True),
            nn.Dropout2d(p=self.dropout),
            nn.Linear(192, self.num_classes)
        )

        self.relu = nn.ReLU(inplace=True)

        # 进行网络参数初始化
        for m in self.modules():
            if isinstance(m, nn.Conv3d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
            elif isinstance(m, nn.BatchNorm3d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)

    def forward(self, x):
        out = self.conv1(x)

        # 这儿是在第一层串联加上2D卷积，进行第一次并联卷积
        identity = out
        out = self.conv2(out) + identity
        out = self.relu(out)

        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        out = self.layer4(out)

        out = self.conv5(out)
        out = out.squeeze(4).squeeze(3).squeeze(2)
        out = self.conv6(out)
        return out


class MixBlock(nn.Module):

    def __init__(self, block, inplanes, outplanes, blocks, se_style='all', use_2d=False):
        """
        :param block: the block class, either BasicBlock or Bottleneck.
        :param inplanes: the number of input plances.
        :param planes: the number of output planes.
        :param blocks: the number of block.
        :param stride: (temporal, spatial) stride.
        """
        super(MixBlock, self).__init__()

        self.se_ratio = 1 / 16
        self.blocks = blocks
        if se_style == 'all':
            use_se = [True] * blocks
        elif se_style == 'half':
            use_se = [i % 2 == 0 for i in range(blocks)]
        else:
            use_se = [False] * blocks
        self.use2d = use_2d

        downsample = nn.Sequential(
            nn.Conv3d(inplanes, outplanes, 1, stride=(1, 2, 2), padding=0, bias=False),
            nn.BatchNorm3d(outplanes),
        )

        self.bottlenecks = nn.ModuleList()
        self.bottlenecks.append(block(inplanes, outplanes, 2, downsample=downsample
                                      , se_ratio=self.se_ratio if use_se[0] else None, use_2d=self.use2d))

        for i in range(1, self.blocks):
            self.bottlenecks.append(block(outplanes, outplanes
                                          , se_ratio=self.se_ratio if use_se[i] else None, use_2d=self.use2d))

    def forward(self, x):
        out = self.bottlenecks[0](x)
        for i in range(1, self.blocks):
            out = self.bottlenecks[i](out)

        return out


def get_net_light(dropout=0.5, n_classes=2, **kwargs):

    model = MixResNet(BasicBlock, [1, 1, 1, 1], dropout, n_classes, **kwargs)

    return model


# class SEModule(nn.Module):

#     def __init__(self, channels, reduction):
#         super().__init__()
#         self.avg_pool = nn.AdaptiveAvgPool3d(1)
#         self.bottleneck = self._round_width(channels, reduction)
#         self.fc1 = nn.Conv3d(
#             channels, self.bottleneck, kernel_size=1, padding=0)
#         self.relu = nn.ReLU()
#         self.fc2 = nn.Conv3d(
#             self.bottleneck, channels, kernel_size=1, padding=0)
#         self.sigmoid = nn.Sigmoid()

#     @staticmethod
#     def _round_width(width, multiplier, min_width=8, divisor=8):
#         width *= multiplier
#         min_width = min_width or divisor
#         width_out = max(min_width,
#                         int(width + divisor / 2) // divisor * divisor)
#         if width_out < 0.9 * width:
#             width_out += divisor
#         return int(width_out)

#     def forward(self, x):
#         module_input = x
#         x = self.avg_pool(x)
#         x = self.fc1(x)
#         x = self.relu(x)
#         x = self.fc2(x)
#         x = self.sigmoid(x)
#         return module_input * x


# class SEModule(nn.Module):
#     # SENet的卷积方式实现，计算量更少一些，reduction默认为 1/16，采用平均池化
#     def __init__(self, channels, reduction):
#         super().__init__()
#         self.avg_pool = nn.AdaptiveAvgPool3d(1)
#         self.bottleneck = channels // 16
#         self.fc1 = nn.Conv3d(
#             channels, self.bottleneck, kernel_size=1, padding=0)
#         self.relu = nn.ReLU()
#         self.fc2 = nn.Conv3d(
#             self.bottleneck, channels, kernel_size=1, padding=0)
#         self.sigmoid = nn.Sigmoid()

#     def forward(self, x):
#         module_input = x
#         x = self.avg_pool(x)
#         x = self.fc1(x)
#         x = self.relu(x)
#         x = self.fc2(x)
#         x = self.sigmoid(x)
#         return module_input * x


# class SEModule(nn.Module):
#     # SENet的卷积方式实现，计算量更少一些，reduction默认为 1/16，采用平均池化+最大池化
#     def __init__(self, channels, reduction):
#         super().__init__()
#         self.avg_pool = nn.AdaptiveAvgPool3d(1)
#         self.max_pool = nn.AdaptiveMaxPool3d(1)
#         self.bottleneck = channels // 16
#         self.fc1 = nn.Conv3d(
#             channels, self.bottleneck, kernel_size=1, padding=0)
#         self.relu = nn.ReLU()
#         self.fc2 = nn.Conv3d(
#             self.bottleneck, channels, kernel_size=1, padding=0)
#         self.sigmoid = nn.Sigmoid()

#     def forward(self, x):
#         module_input = x
#         final = x
#         x = self.avg_pool(x)
#         x = self.fc1(x)
#         x = self.relu(x)
#         x = self.fc2(x)
#         x = x + self.fc2(self.relu(self.fc1(self.max_pool(module_input))))
#         x = self.sigmoid(x)
#         return final * x


class SEModule(nn.Module):
    # ECANet的3D实现，计算量在SENet上更少一些; k_size通过输入的channels的大小来确定，不确定是否能行，不行的话就设定为3
    def __init__(self, channels, reduction, k_size=3):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool3d(1)
        self.fc2 = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        module_input = x
        x = self.avg_pool(x)
        x = self.fc2(x.squeeze(-1).squeeze(-2).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-2).unsqueeze(-1)
        x = self.sigmoid(x)
        return module_input * x


class SAM(nn.Module):
    # 先利用平均池化来将图片维度缩减到1，然后利用torch.mean或者max来进行通道上的压缩，最后广播机制作用于原特征; 这种情况下默认视频片段拥有相似的空间感兴趣区域
    def __init__(self, size):  # size表示此时特征图的大小，用以做池化操作
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool3d((1, size, size))
        self.conv_after_concat = nn.Conv3d(2, 1, kernel_size=(1, 3, 3), stride=1, padding=(0, 1, 1))
        self.sigmoid_spatial = nn.Sigmoid()

    def forward(self, x):
        module_input = x
        x = self.avg_pool(x)
        avg = torch.mean(x, 1, keepdim=True)
        mx, _ = torch.max(x, 1, keepdim=True)
        x = torch.cat((avg, mx), 1)

        x = self.conv_after_concat(x)
        x = self.sigmoid_spatial(x)
        x = module_input * x
        return x
