import torch
import torch.nn as nn
import torch.nn.functional as F


class SEModule(nn.Module):
    """Squeeze-and-Excitation模块"""

    def __init__(self, in_channels, reduction=4):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(in_channels, in_channels // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(in_channels // reduction, in_channels, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class InvertedResidual(nn.Module):
    """倒残差结构"""

    def __init__(self, in_channels, out_channels, stride, expand_ratio):
        super().__init__()
        self.stride = stride
        hidden_dim = int(round(in_channels * expand_ratio))
        self.use_res_connect = self.stride == 1 and in_channels == out_channels

        layers = []
        if expand_ratio != 1:
            # 点卷积扩张
            layers.append(nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0, bias=False))
            layers.append(nn.BatchNorm2d(hidden_dim))
            layers.append(nn.SiLU())

        # 深度卷积
        layers.append(nn.Conv2d(hidden_dim, hidden_dim, kernel_size=3, stride=stride,
                                padding=1, groups=hidden_dim, bias=False))
        layers.append(nn.BatchNorm2d(hidden_dim))
        layers.append(nn.SiLU())

        # SE模块
        layers.append(SEModule(hidden_dim))

        # 点卷积压缩
        layers.append(nn.Conv2d(hidden_dim, out_channels, kernel_size=1, stride=1, padding=0, bias=False))
        layers.append(nn.BatchNorm2d(out_channels))

        self.conv = nn.Sequential(*layers)

    def forward(self, x):
        if self.use_res_connect:
            return x + self.conv(x)
        else:
            return self.conv(x)


class MultiScaleAttention(nn.Module):
    """多尺度注意力模块"""

    def __init__(self, dim, heads=4, dim_head=32):
        super().__init__()
        inner_dim = dim_head * heads
        self.heads = heads
        self.scale = dim_head ** -0.5

        # 多尺度特征提取
        self.local_conv1 = nn.Conv2d(dim, dim, kernel_size=3, padding=1, groups=dim)
        self.local_conv2 = nn.Conv2d(dim, dim, kernel_size=5, padding=2, groups=dim)

        # 注意力机制
        self.q = nn.Linear(dim, inner_dim, bias=False)
        self.kv = nn.Linear(dim, inner_dim * 2, bias=False)
        self.proj = nn.Linear(inner_dim, dim)

    def forward(self, x):
        b, c, h, w = x.shape

        # 提取多尺度特征
        x1 = self.local_conv1(x)
        x2 = self.local_conv2(x)
        x = x1 + x2  # 特征融合

        x = x.flatten(2).transpose(1, 2)  # B, N, C

        q = self.q(x).reshape(b, h * w, self.heads, -1).transpose(1, 2)
        kv = self.kv(x).reshape(b, h * w, 2, self.heads, -1).permute(2, 0, 3, 1, 4)
        k, v = kv[0], kv[1]

        attn = (q @ k.transpose(-2, -1)) * self.scale
        attn = attn.softmax(dim=-1)

        out = (attn @ v).transpose(1, 2).reshape(b, h * w, -1)
        out = self.proj(out)
        out = out.transpose(1, 2).reshape(b, c, h, w)

        return out


class MobileViTBlock(nn.Module):
    """MobileViT v2块"""

    def __init__(self, in_channels, out_channels, stride, hidden_dim, num_heads=4):
        super().__init__()
        self.stride = stride

        # 局部特征提取
        self.local_rep = nn.Sequential(
            nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1, groups=in_channels),
            nn.BatchNorm2d(in_channels),
            nn.Conv2d(in_channels, hidden_dim, kernel_size=1, stride=1, padding=0),
            nn.SiLU()
        )

        # 全局特征提取
        self.global_rep = MultiScaleAttention(hidden_dim, heads=num_heads)

        # 融合局部和全局特征
        self.fusion = nn.Sequential(
            nn.Conv2d(hidden_dim, in_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(in_channels)
        )

        # 输出层
        self.conv_proj = nn.Sequential(
            nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=stride, padding=1, groups=in_channels),
            nn.BatchNorm2d(in_channels),
            nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0),
            nn.SiLU()
        )

        self.use_res_connect = self.stride == 1 and in_channels == out_channels

    def forward(self, x):
        identity = x

        # 局部特征
        local_features = self.local_rep(x)

        # 全局特征
        global_features = self.global_rep(local_features)

        # 融合特征
        fused_features = self.fusion(global_features)

        # 残差连接
        if self.use_res_connect:
            fused_features = identity + fused_features

        # 输出投影
        out = self.conv_proj(fused_features)

        return out


class MobileViT(nn.Module):
    """MobileViT v2模型"""

    def __init__(self, num_classes=1000, variant='mobilevitv2_100'):
        super().__init__()

        # 配置参数 (width_multiplier对应不同变体)
        if variant == 'mobilevitv2_050':
            width_multiplier = 0.5
        elif variant == 'mobilevitv2_075':
            width_multiplier = 0.75
        elif variant == 'mobilevitv2_100':
            width_multiplier = 1.0
        elif variant == 'mobilevitv2_125':
            width_multiplier = 1.25
        elif variant == 'mobilevitv2_150':
            width_multiplier = 1.5
        elif variant == 'mobilevitv2_200':
            width_multiplier = 2.0
        else:
            raise ValueError(f"不支持的变体: {variant}")

        # 基础通道数
        base_channels = {
            'stage1': 16,
            'stage2': 32,
            'stage3': 64,
            'stage4': 96,
            'stage5': 128,
            'expansion': 4
        }

        # 调整通道数以匹配不同变体
        for key in base_channels:
            if key != 'expansion':
                base_channels[key] = int(base_channels[key] * width_multiplier)

        # 模型结构
        self.conv_stem = nn.Sequential(
            nn.Conv2d(3, base_channels['stage1'], kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm2d(base_channels['stage1']),
            nn.SiLU()
        )

        # 第一阶段
        self.stage1 = nn.Sequential(
            InvertedResidual(
                in_channels=base_channels['stage1'],
                out_channels=base_channels['stage2'],
                stride=1,
                expand_ratio=1
            )
        )

        # 第二阶段
        self.stage2 = nn.Sequential(
            InvertedResidual(
                in_channels=base_channels['stage2'],
                out_channels=base_channels['stage2'],
                stride=2,
                expand_ratio=4
            ),
            InvertedResidual(
                in_channels=base_channels['stage2'],
                out_channels=base_channels['stage2'],
                stride=1,
                expand_ratio=4
            )
        )

        # 第三阶段 (MobileViT块)
        self.stage3 = nn.Sequential(
            InvertedResidual(
                in_channels=base_channels['stage2'],
                out_channels=base_channels['stage3'],
                stride=2,
                expand_ratio=4
            ),
            MobileViTBlock(
                in_channels=base_channels['stage3'],
                out_channels=base_channels['stage3'],
                stride=1,
                hidden_dim=base_channels['stage3'] * base_channels['expansion'],
                num_heads=4
            )
        )

        # 第四阶段 (MobileViT块)
        self.stage4 = nn.Sequential(
            InvertedResidual(
                in_channels=base_channels['stage3'],
                out_channels=base_channels['stage4'],
                stride=2,
                expand_ratio=4
            ),
            MobileViTBlock(
                in_channels=base_channels['stage4'],
                out_channels=base_channels['stage4'],
                stride=1,
                hidden_dim=base_channels['stage4'] * base_channels['expansion'],
                num_heads=4
            )
        )

        # 第五阶段 (MobileViT块)
        self.stage5 = nn.Sequential(
            InvertedResidual(
                in_channels=base_channels['stage4'],
                out_channels=base_channels['stage5'],
                stride=2,
                expand_ratio=4
            ),
            MobileViTBlock(
                in_channels=base_channels['stage5'],
                out_channels=base_channels['stage5'],
                stride=1,
                hidden_dim=base_channels['stage5'] * base_channels['expansion'],
                num_heads=4
            )
        )

        # 分类头
        self.classifier = nn.Sequential(
            nn.Conv2d(base_channels['stage5'], 512, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm2d(512),
            nn.SiLU(),
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Dropout(0.1),
            nn.Linear(512, num_classes)
        )

        # 权重初始化
        self._initialize_weights()

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.conv_stem(x)
        x = self.stage1(x)
        x = self.stage2(x)
        x = self.stage3(x)
        x = self.stage4(x)
        x = self.stage5(x)
        x = self.classifier(x)
        return x


# 创建MobileViT v2模型的工厂函数
def create_mobilevitv2_model(num_classes=1000, variant='mobilevitv2_100'):
    """创建MobileViT v2模型"""
    return MobileViT(num_classes=num_classes, variant=variant)


# 测试模型
if __name__ == "__main__":
    model = create_mobilevitv2_model(num_classes=102, variant='mobilevitv2_100')
    x = torch.randn(1, 3, 224, 224)
    y = model(x)
    print(f"输入尺寸: {x.shape}")
    print(f"输出尺寸: {y.shape}")