# MobileNet-v3结构[pytorch1.7.1并未写入网络结构且无预训练]
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from typing import Callable, List, Optional
from functools import partial
from torchsummary import summary
from torch.autograd import Variable


def _make_divisible(ch, divisor=8, min_ch=None):
    """
    作用:将传入的channel调整到离8的整数倍最接近的大小，对机器更加友好
    """
    if min_ch is None:
        min_ch = divisor
    # 将ch调整到离得最近的8的倍数
    new_ch = max(min_ch, int(ch + divisor / 2) // divisor * divisor)
    # 确保向下取整时不会超过10%
    if new_ch < 0.9 * ch:
        new_ch += divisor
    return new_ch


class ConvBNActivation(nn.Sequential):
    def __init__(self,
                 in_planes: int,  # 输入channel
                 out_planes: int,  # 输出channel
                 kernel_size: int = 3,  # 核大小
                 stride: int = 1,  # 步距大小
                 groups: int = 1,  # 组数
                 norm_layer: Optional[Callable[...,
                                               nn.Module]] = None,  # 批处理方式
                 activation_layer: Optional[Callable[..., nn.Module]] = None):  # 激活方式

        padding = (kernel_size - 1) // 2  # 计算padding
        if norm_layer is None:
            norm_layer = nn.BatchNorm2d  # 默认批处理方式
        if activation_layer is None:
            activation_layer = nn.ReLU6  # 未传入时默认使用ReLU6

        super(ConvBNActivation, self).__init__(nn.Conv2d(in_channels=in_planes,
                                                         out_channels=out_planes,
                                                         kernel_size=kernel_size,
                                                         stride=stride,
                                                         padding=padding,
                                                         groups=groups,
                                                         bias=False),
                                               norm_layer(out_planes),
                                               activation_layer(inplace=True))


class SqueezeExcitation(nn.Module):  # 注意力机制模块(2fc+2激活)
    def __init__(self, input_c: int, squeeze_factor: int = 4):
        super(SqueezeExcitation, self).__init__()
        # 将输入的channel缩小到原来的四分之一，且保证是8的倍数
        squeeze_c = _make_divisible(input_c // squeeze_factor, 8)
        # 这里都是使用卷积做全连接层(gap)，且核为1，大小不变
        self.fc1 = nn.Conv2d(input_c, squeeze_c, 1)  # 第一个全连接层降维到原来1/4
        self.fc2 = nn.Conv2d(squeeze_c, input_c, 1)  # 第二个全连接层输出与输入的channel一致

    def forward(self, x: Tensor) -> Tensor:
        scale = F.adaptive_avg_pool2d(x, output_size=(1, 1))
        scale = self.fc1(scale)
        scale = F.relu(scale, inplace=True)  # 首个全连接层用的relu激活
        scale = self.fc2(scale)
        scale = F.hardsigmoid(scale, inplace=True)  # 第二个fc用的h-sigmoid
        return scale * x  # 与输入相乘，相乘赋予不同feature不同权重


class InvertedResidualConfig:  # 每一个bneck结构
    def __init__(self,
                 input_c: int,
                 kernel: int,
                 expanded_c: int,
                 out_c: int,
                 use_se: bool,
                 activation: str,
                 stride: int,
                 width_multi: float):
        self.input_c = self.adjust_channels(input_c, width_multi)
        self.kernel = kernel
        self.expanded_c = self.adjust_channels(expanded_c, width_multi)
        self.out_c = self.adjust_channels(out_c, width_multi)
        self.use_se = use_se  # 当前层是否使用注意力机制模块
        self.use_hs = activation == "HS"  # whether using h-swish activation
        self.stride = stride  # 步距

    @staticmethod
    def adjust_channels(channels: int, width_multi: float):  # 输入channel和倍率因子
        return _make_divisible(channels * width_multi, 8)  # 乘倍率因子且调整成8的倍数


class InvertedResidual(nn.Module):  # 倒残差整体结构
    def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module]):
        super(InvertedResidual, self).__init__()

        if cnf.stride not in [1, 2]:  # 步距非法判定
            raise ValueError("illegal stride value.")
        # 当步距为1且输入channel和输出channel相等时，才使用捷径分支
        self.use_res_connect = (cnf.stride == 1 and cnf.input_c == cnf.out_c)

        layers: List[nn.Module] = []  # 存放Module的列表
        activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU  # 激活函数

        # expand 即升维1×1卷积 排除第一层，因为它不包括Conv1×1
        if cnf.expanded_c != cnf.input_c:
            layers.append(ConvBNActivation(cnf.input_c,
                                           cnf.expanded_c,
                                           kernel_size=1,
                                           norm_layer=norm_layer,
                                           activation_layer=activation_layer))

        # depthwise
        layers.append(ConvBNActivation(cnf.expanded_c,  # 上一层输出的channel
                                       cnf.expanded_c,  # 输入=输出
                                       kernel_size=cnf.kernel,
                                       stride=cnf.stride,
                                       groups=cnf.expanded_c,  # dw核心,单独使用chennel
                                       norm_layer=norm_layer,
                                       activation_layer=activation_layer))
        # 是否使用注意力机制
        if cnf.use_se:
            layers.append(SqueezeExcitation(cnf.expanded_c))

        # project 降维1×1
        layers.append(ConvBNActivation(cnf.expanded_c,
                                       cnf.out_c,
                                       kernel_size=1,
                                       norm_layer=norm_layer,
                                       activation_layer=nn.Identity))  # 线性激活(y=x)无处理

        self.block = nn.Sequential(*layers)  # 打包成一个Sequential
        self.out_channels = cnf.out_c  # 输出
        self.is_strided = cnf.stride > 1

    def forward(self, x: Tensor) -> Tensor:
        result = self.block(x)
        if self.use_res_connect:
            result += x  # shortcut

        return result


class MobileNetV3(nn.Module):
    def __init__(self,
                 # bneck参数列表
                 inverted_residual_setting: List[InvertedResidualConfig],
                 last_channel: int,  # 最终channel
                 num_classes: int = 1000,  # 类别
                 block: Optional[Callable[..., nn.Module]] = None,
                 norm_layer: Optional[Callable[..., nn.Module]] = None):
        super(MobileNetV3, self).__init__()

        if not inverted_residual_setting:
            raise ValueError(
                "The inverted_residual_setting should not be empty.")
        elif not (isinstance(inverted_residual_setting, List) and
                  all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])):
            raise TypeError(
                "The inverted_residual_setting should be List[InvertedResidualConfig]")

        if block is None:
            block = InvertedResidual  # 倒残差结构

        if norm_layer is None:  # 默认BatchNorm2d
            # partial为python语法,为nn.BatchNorm2d赋值两个默认参数
            norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01)

        layers: List[nn.Module] = []  # 空列表

        # 首层输入channnel(16)
        firstconv_output_c = inverted_residual_setting[0].input_c
        layers.append(ConvBNActivation(3,
                                       firstconv_output_c,
                                       kernel_size=3,  # 核心为3
                                       stride=2,  # 步距为2
                                       norm_layer=norm_layer,
                                       activation_layer=nn.Hardswish))  # h-swish激活
        # building inverted residual blocks  遍历每一个块
        for cnf in inverted_residual_setting:
            layers.append(block(cnf, norm_layer))

        # building last several layers
        lastconv_input_c = inverted_residual_setting[-1].out_c  # 作为后续输入
        lastconv_output_c = 6 * lastconv_input_c  # 6倍输出
        layers.append(ConvBNActivation(lastconv_input_c,
                                       lastconv_output_c,
                                       kernel_size=1,
                                       norm_layer=norm_layer,
                                       activation_layer=nn.Hardswish))  # h-swish
        self.features = nn.Sequential(*layers)  # 打包全部网络层
        self.avgpool = nn.AdaptiveAvgPool2d(1)  # 平均池化
        # 分类fc
        self.classifier = nn.Sequential(nn.Linear(lastconv_output_c, last_channel),
                                        nn.Hardswish(inplace=True),
                                        nn.Dropout(p=0.2, inplace=True),
                                        nn.Linear(last_channel, num_classes))

        # initial weights
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode="fan_out")
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)

    def _forward_impl(self, x: Tensor) -> Tensor:
        x = self.features(x)  # 提取特征
        x = self.avgpool(x)  # 平均池化
        x = torch.flatten(x, 1)  # 打平
        x = self.classifier(x)  # 分类

        return x

    def forward(self, x: Tensor) -> Tensor:
        return self._forward_impl(x)
        

def mobilenet_v3_large(num_classes: int = 1000,
                       reduced_tail: bool = False) -> MobileNetV3:
    """
    Constructs a large MobileNetV3 architecture from
    "Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>.
    weights_link:
    https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth
    Args:
        num_classes (int): number of classes
        reduced_tail (bool): If True, reduces the channel counts of all feature layers
            between C4 and C5 by 2. It is used to reduce the channel redundancy in the
            backbone for Detection and Segmentation.
    """
    width_multi = 1.0  # 超参
    bneck_conf = partial(InvertedResidualConfig,
                         width_multi=width_multi)  # 赋值超参数
    adjust_channels = partial(
        InvertedResidualConfig.adjust_channels, width_multi=width_multi)  # 给方法赋值超参数

    reduce_divider = 2 if reduced_tail else 1  # 一般不用

    inverted_residual_setting = [
        # input_c, kernel, expanded_c, out_c, use_se, activation, stride
        bneck_conf(16, 3, 16, 16, False, "RE", 1),
        bneck_conf(16, 3, 64, 24, False, "RE", 2),  # C1
        bneck_conf(24, 3, 72, 24, False, "RE", 1),
        bneck_conf(24, 5, 72, 40, True, "RE", 2),  # C2
        bneck_conf(40, 5, 120, 40, True, "RE", 1),
        bneck_conf(40, 5, 120, 40, True, "RE", 1),
        bneck_conf(40, 3, 240, 80, False, "HS", 2),  # C3
        bneck_conf(80, 3, 200, 80, False, "HS", 1),
        bneck_conf(80, 3, 184, 80, False, "HS", 1),
        bneck_conf(80, 3, 184, 80, False, "HS", 1),
        bneck_conf(80, 3, 480, 112, True, "HS", 1),
        bneck_conf(112, 3, 672, 112, True, "HS", 1),
        bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2),  # C4
        bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider,
                   160 // reduce_divider, True, "HS", 1),
        bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider,
                   160 // reduce_divider, True, "HS", 1),
    ]
    last_channel = adjust_channels(1280 // reduce_divider)  # C5

    return MobileNetV3(inverted_residual_setting=inverted_residual_setting,
                       last_channel=last_channel,
                       num_classes=num_classes)


def mobilenet_v3_small(num_classes: int = 1000,
                       reduced_tail: bool = False) -> MobileNetV3:
    """
    Constructs a large MobileNetV3 architecture from
    "Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>.
    Args:
        num_classes (int): number of classes
        reduced_tail (bool): If True, reduces the channel counts of all feature layers
            between C4 and C5 by 2. It is used to reduce the channel redundancy in the
            backbone for Detection and Segmentation.
    """
    width_multi = 1.0
    bneck_conf = partial(InvertedResidualConfig, width_multi=width_multi)
    adjust_channels = partial(
        InvertedResidualConfig.adjust_channels, width_multi=width_multi)

    reduce_divider = 2 if reduced_tail else 1

    inverted_residual_setting = [
        # input_c, kernel, expanded_c, out_c, use_se, activation, stride
        bneck_conf(16, 3, 16, 16, True, "RE", 2),  # C1
        bneck_conf(16, 3, 72, 24, False, "RE", 2),  # C2
        bneck_conf(24, 3, 88, 24, False, "RE", 1),
        bneck_conf(24, 5, 96, 40, True, "HS", 2),  # C3
        bneck_conf(40, 5, 240, 40, True, "HS", 1),
        bneck_conf(40, 5, 240, 40, True, "HS", 1),
        bneck_conf(40, 5, 120, 48, True, "HS", 1),
        bneck_conf(48, 5, 144, 48, True, "HS", 1),
        bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2),  # C4
        bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider,
                   96 // reduce_divider, True, "HS", 1),
        bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider,
                   96 // reduce_divider, True, "HS", 1)
    ]
    last_channel = adjust_channels(1024 // reduce_divider)  # C5

    return MobileNetV3(inverted_residual_setting=inverted_residual_setting,
                       last_channel=last_channel,
                       num_classes=num_classes)


if __name__ == "__main__":
    net = mobilenet_v3_large(num_classes=751).cuda()
    print(net)
    summary(net, input_size=(3, 224, 224))

    in_x = torch.randn(4, 3, 224, 224).cuda()
    output = net(in_x)
    print('output size:', output.size())
