import torch
import torch.nn as nn
import torch.nn.functional as F


# Hard Sigmoid 激活函数
class h_sigmoid(nn.Module):
    def __init__(self, inplace=True):
        super(h_sigmoid, self).__init__()
        self.inplace = inplace

    def forward(self, x):
        return F.relu6(x + 3., inplace=self.inplace) / 6.


# Hard Swish 激活函数
class h_swish(nn.Module):
    def __init__(self, inplace=True):
        super(h_swish, self).__init__()
        self.inplace = inplace

    def forward(self, x):
        out = F.relu6(x + 3., self.inplace) / 6.
        return out * x


# 调整通道数使其可被指定的除数整除
def _make_divisible(v, divisor=8, min_value=None):
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # 确保四舍五入后的值不会小于原值的90%
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v


# Squeeze-and-Excitation 模块
class SqueezeBlock(nn.Module):
    def __init__(self, exp_size, divide=4):
        super(SqueezeBlock, self).__init__()
        self.dense = nn.Sequential(
            nn.Linear(exp_size, exp_size // divide),
            nn.ReLU(inplace=True),
            nn.Linear(exp_size // divide, exp_size),
            h_sigmoid()
        )

    def forward(self, x):
        batch, channels, height, width = x.size()
        out = F.avg_pool2d(x, kernel_size=[height, width]).view(batch, -1)  # 全局平均池化
        out = self.dense(out)  # Squeeze-and-Excitation操作
        out = out.view(batch, channels, 1, 1)
        return out * x  # 加权调整


# MobileNetV3 的基本单元
class MobileBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernal_size, stride, nonLinear, SE, exp_size, dropout_rate=1.0):
        super(MobileBlock, self).__init__()
        self.out_channels = out_channels
        self.nonLinear = nonLinear
        self.SE = SE
        self.dropout_rate = dropout_rate
        padding = (kernal_size - 1) // 2

        # 残差连接条件：输入和输出通道相同，且步幅为1
        self.use_connect = (stride == 1 and in_channels == out_channels)

        # 激活函数选择
        if self.nonLinear == "RE":
            activation = nn.ReLU
        else:
            activation = h_swish

        # Pointwise 卷积，用于扩展通道数
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, exp_size, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(exp_size),
            activation(inplace=True)
        )

        # Depthwise 卷积
        self.depth_conv = nn.Sequential(
            nn.Conv2d(exp_size, exp_size, kernel_size=kernal_size, stride=stride, padding=padding, groups=exp_size),
            nn.BatchNorm2d(exp_size),
        )

        # Squeeze-and-Excite 模块
        if self.SE:
            self.squeeze_block = SqueezeBlock(exp_size)

        # Pointwise 卷积，用于减少通道数
        self.point_conv = nn.Sequential(
            nn.Conv2d(exp_size, out_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(out_channels),
            activation(inplace=True)
        )

    def forward(self, x):
        out = self.conv(x)  # 转换通道 in_channels -> exp_size
        out = self.depth_conv(out)  # 深度卷积

        # Squeeze-and-Excite 操作
        if self.SE:
            out = self.squeeze_block(out)

        # Pointwise 卷积 exp_size -> out_channels
        out = self.point_conv(out)

        # 残差连接
        if self.use_connect:
            return x + out
        else:
            return out


# MobileNetV3 主网络
class MobileNetV3(nn.Module):
    def __init__(self, model_mode="LARGE", num_classes=1000, multiplier=1.0):
        super(MobileNetV3, self).__init__()
        self.num_classes = num_classes

        if model_mode == "LARGE":
            layers = [
                [16, 16, 3, 1, "RE", False, 16],
                [16, 24, 3, 2, "RE", False, 64],
                [24, 24, 3, 1, "RE", False, 72],
                [24, 40, 5, 2, "RE", True, 72],
                [40, 40, 5, 1, "RE", True, 120],
                [40, 40, 5, 1, "RE", True, 120],
                [40, 80, 3, 2, "HS", False, 240],
                [80, 80, 3, 1, "HS", False, 200],
                [80, 80, 3, 1, "HS", False, 184],
                [80, 80, 3, 1, "HS", False, 184],
                [80, 112, 3, 1, "HS", True, 480],
                [112, 112, 3, 1, "HS", True, 672],
                [112, 160, 5, 1, "HS", True, 672],
                [160, 160, 5, 2, "HS", True, 672],
                [160, 160, 5, 1, "HS", True, 960],
            ]
            init_conv_out = _make_divisible(16 * multiplier)
            self.init_conv = nn.Sequential(
                nn.Conv2d(in_channels=3, out_channels=init_conv_out, kernel_size=3, stride=2, padding=1),
                nn.BatchNorm2d(init_conv_out),
                h_swish(inplace=True),
            )

            self.block = []
            for in_channels, out_channels, kernal_size, stride, nonlinear, se, exp_size in layers:
                in_channels = _make_divisible(in_channels * multiplier)
                out_channels = _make_divisible(out_channels * multiplier)
                exp_size = _make_divisible(exp_size * multiplier)
                self.block.append(MobileBlock(in_channels, out_channels, kernal_size, stride, nonlinear, se, exp_size))
            self.block = nn.Sequential(*self.block)

            out_conv1_in = _make_divisible(160 * multiplier)
            out_conv1_out = _make_divisible(960 * multiplier)
            self.out_conv1 = nn.Sequential(
                nn.Conv2d(out_conv1_in, out_conv1_out, kernel_size=1, stride=1),
                nn.BatchNorm2d(out_conv1_out),
                h_swish(inplace=True),
            )

            out_conv2_in = _make_divisible(960 * multiplier)
            out_conv2_out = _make_divisible(1280 * multiplier)
            self.out_conv2 = nn.Sequential(
                nn.Conv2d(out_conv2_in, out_conv2_out, kernel_size=1, stride=1),
                h_swish(inplace=True),
                nn.Conv2d(out_conv2_out, self.num_classes, kernel_size=1, stride=1),
            )

        self.apply(self._initialize_weights)

    # 初始化权重
    def _initialize_weights(self, m):
        if isinstance(m, nn.Conv2d):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.zeros_(m.bias)
        elif isinstance(m, nn.BatchNorm2d):
            m.weight.data.fill_(1)
            m.bias.data.zero_()
        elif isinstance(m, nn.Linear):
            nn.init.normal_(m.weight, 0, 0.01)
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    def forward(self, x):
        out = self.init_conv(x)  # 起始部分
        out = self.block(out)  # 中间部分
        out = self.out_conv1(out)  # 最后部分
        batch, channels, height, width = out.size()
        out = F.avg_pool2d(out, kernel_size=[height, width])  # 全局平均池化
        out = self.out_conv2(out)
        return out.view(batch, -1)  # 展平输出，用于分类


# 测试网络
if __name__ == "__main__":
    model = MobileNetV3(num_classes=1000)
    print(f'Total parameters: {sum(param.numel() for param in model.parameters()) / 1e6:.2f} M')
    # 创建一个随机输入张量
    x = torch.randn(1, 3, 224, 224)  # Batch size=1, Channels=3, Height=224, Width=224
    output = model(x)
    print(output.shape)  # 应输出 torch.Size([1, 1000])

    # 特点：
    # •	SE 模块（Squeeze-and-Excitation）：增强通道注意力机制。
    # •	改进激活函数：引入 h-swish（hard-swish） 替代 ReLU6，减少性能损失。
    # •	使用NAS搜索出来的
