import torch
import torch.nn as nn
import torch.nn.functional as F


class SpatialAttentionFusion(nn.Module):
    """空间注意力融合模块"""

    def __init__(self, img_ch=256, vec_ch=256):
        super().__init__()
        # 通道压缩层
        self.vec_adapter = nn.Sequential(
            nn.Linear(vec_ch, img_ch),
            nn.LayerNorm(img_ch),
            nn.GELU()
        )

        # 空间注意力生成
        self.attn_conv = nn.Sequential(
            nn.Conv2d(img_ch * 2, 512, 1),
            nn.BatchNorm2d(512),
            nn.ReLU(),
            nn.Conv2d(512, 1, 1),
            nn.Sigmoid()
        )

        # 残差连接参数
        self.gamma = nn.Parameter(torch.zeros(1))

    def forward(self, img_feat, vec_feat):
        """
        参数：
            img_feat: [b,256,h,w] 图像特征
            vec_feat: [b,256] 向量特征
        返回：
            fused_feat: [b,256,h,w]
        """
        # 向量特征适配
        vec_adapted = self.vec_adapter(vec_feat)  # [b,256]

        # 空间广播
        vec_exp = vec_adapted.unsqueeze(-1).unsqueeze(-1)  # [b,256,1,1]
        vec_exp = vec_exp.expand_as(img_feat)  # [b,256,h,w]

        # 注意力生成
        combined = torch.cat([img_feat, vec_exp], dim=1)  # [b,512,h,w]
        spatial_attn = self.attn_conv(combined)  # [b,1,h,w]

        # 特征融合
        return img_feat + self.gamma * (img_feat * spatial_attn)


class EnhancedMultiModalNet(nn.Module):
    def __init__(self,channel=1,lei=4):
        super().__init__()

        # 图像特征提取（保持原结构）
        def build_stream():
            return nn.Sequential(
                nn.Conv2d(3, 64, 3, padding=1),
                ResidualSEBlock(64),
                nn.MaxPool2d(2),
                nn.Conv2d(64, 128, 3, padding=1),
                ResidualSEBlock(128),
                nn.MaxPool2d(2),
                nn.Conv2d(128, 256, 3, padding=1),
                ResidualSEBlock(256),
                nn.MaxPool2d(2),
                ChannelAttention(256)
            )

        self.stream1 = build_stream()
        self.stream2 = build_stream()

        # 向量编码器增强
        self.vec_encoder = nn.Sequential(
            nn.Linear(42, 512),
            nn.LayerNorm(512),
            nn.GELU(),
            nn.Dropout(0.3),
            nn.Linear(512, 256),
            StochasticDepth(0.2)
        )

        # 替换为空间注意力融合
        self.fusion = SpatialAttentionFusion()

        # 分类头
        self.classifier = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.Linear(256, 512),
            nn.BatchNorm1d(512),
            nn.GELU(),
            nn.Dropout(0.5),
            nn.Linear(512, 4)
        )

    def forward(self, x1, x2, x3):
        # 图像特征提取
        f1 = self.stream1(x1)  # [b,256,32,32]
        f2 = self.stream2(x2)

        # 向量特征处理
        vec_feat = self.vec_encoder(x3)  # [b,256]

        # 空间注意力融合
        fused = self.fusion(f1 + f2, vec_feat)

        # 分类输出
        return self.classifier(fused)

    # 辅助模块定义


class ResidualSEBlock(nn.Module):
    """带通道注意力的残差块"""

    def __init__(self, in_ch, expansion=4):
        super().__init__()
        mid_ch = in_ch * expansion
        self.residual = nn.Sequential(
            nn.Conv2d(in_ch, mid_ch, 1),
            nn.BatchNorm2d(mid_ch),
            nn.GELU(),
            nn.Conv2d(mid_ch, mid_ch, 3, padding=1, groups=mid_ch),  # 深度可分离卷积
            nn.BatchNorm2d(mid_ch),
            nn.GELU(),
            nn.Conv2d(mid_ch, in_ch, 1),
            ChannelAttention(in_ch)
        )
        self.shortcut = nn.Identity()

    def forward(self, x):
        return self.residual(x) + self.shortcut(x)


class ChannelAttention(nn.Module):
    """高效通道注意力"""

    def __init__(self, ch, reduction=8):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(ch, max(1, ch // reduction)),
            nn.GELU(),
            nn.Linear(max(1, ch // reduction), ch),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)


class StochasticDepth(nn.Module):
    """随机深度正则化"""

    def __init__(self, survival_prob=0.8):
        super().__init__()
        self.survival_prob = survival_prob

    def forward(self, x):
        if self.training and torch.rand(1)[0] > self.survival_prob:
            return torch.zeros_like(x)
        return x


    # 验证测试
if __name__ == "__main__":
    model = EnhancedMultiModalNet()
    x1 = torch.randn(8, 3, 256, 256)
    x2 = torch.randn(8, 3, 256, 256)
    x3 = torch.randn(8, 42)

    try:
        with torch.no_grad():
            output = model(x1, x2, x3)
            print(f"验证通过！输出形状：{output.shape}")  # 预期输出：torch.Size([8, 4])
    except Exception as e:
        print("错误信息：", e)