import torch
import torch.nn as nn
import torch.nn.functional as F

# -----------------------
# 轻量 CNN 模块（局部特征）
# -----------------------

class BasicConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1):
        super().__init__()
        padding = kernel_size // 2
        self.block = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias=False),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.block(x)

class CNNBackbone(nn.Module):
    def __init__(self, in_channels=3):
        super().__init__()
        self.conv = nn.Sequential(
            BasicConvBlock(in_channels, 64),
            BasicConvBlock(64, 128),
            nn.MaxPool2d(2),
            BasicConvBlock(128, 256),
            nn.AdaptiveAvgPool2d((1, 1))
        )
        self.out_dim = 256

    def forward(self, x):
        x = self.conv(x)
        return x.view(x.size(0), -1)  # (B, 256)

# -----------------------
# 精简 ViT 模块（全局特征）
# -----------------------

class LightweightPatchEncoder(nn.Module):
    def __init__(self, in_channels, embed_dim):
        super().__init__()
        self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=1)
        self.norm = nn.LayerNorm(embed_dim)

    def forward(self, x):  # [B, C, H, W]
        x = self.proj(x)               # [B, embed_dim, H, W]
        B, C, H, W = x.shape
        x = x.flatten(2).transpose(1, 2)  # [B, N, C]
        return self.norm(x)

class MiniTransformerBlock(nn.Module):
    def __init__(self, embed_dim, num_heads, ff_dim):
        super().__init__()
        self.attn = nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
        self.ff = nn.Sequential(
            nn.Linear(embed_dim, ff_dim),
            nn.ReLU(),
            nn.Linear(ff_dim, embed_dim)
        )
        self.norm1 = nn.LayerNorm(embed_dim)
        self.norm2 = nn.LayerNorm(embed_dim)

    def forward(self, x):
        attn_out, _ = self.attn(x, x, x)
        x = self.norm1(x + attn_out)
        return self.norm2(x + self.ff(x))

class MiniViTEncoder(nn.Module):
    def __init__(self, in_channels=3, embed_dim=256, num_heads=2, ff_dim=512, num_layers=2):
        super().__init__()
        self.patch_embed = LightweightPatchEncoder(in_channels, embed_dim)
        self.blocks = nn.Sequential(*[
            MiniTransformerBlock(embed_dim, num_heads, ff_dim)
            for _ in range(num_layers)
        ])
        self.cls_head = nn.Linear(embed_dim, embed_dim)

    def forward(self, x):  # [B, C, 3, P]
        x = self.patch_embed(x)  # → [B, N, C]
        x = self.blocks(x)       # → [B, N, C]
        x = x.mean(dim=1)        # → [B, C]
        return self.cls_head(x)  # → [B, C]

# -----------------------
# 多模态融合模块
# -----------------------

class MultiModalFusion(nn.Module):
    def __init__(self, fused_dim=512):
        super().__init__()
        self.local_branch = CNNBackbone(in_channels=3)  # 输入是伪图像
        self.global_branch = MiniViTEncoder(in_channels=3, embed_dim=256, num_heads=2, ff_dim=512, num_layers=2)
        self.fusion = nn.Sequential(
            nn.Linear(self.local_branch.out_dim + 256, fused_dim),
            nn.ReLU(),
            nn.Dropout(0.3)
        )

    def forward(self, local_input, global_input):  # [B, 3, 3, P]
        local_feat = self.local_branch(local_input)      # (B, 256)
        global_feat = self.global_branch(global_input)   # (B, 256)

        # 检查并调整高度和宽度
        if local_feat.size(2) != global_feat.size(2):  # 检查高度
            global_feat = F.interpolate(global_feat, size=(local_feat.size(2), local_feat.size(3)), mode='bilinear',
                                        align_corners=False)

        if local_feat.size(3) != global_feat.size(3):  # 检查宽度
            global_feat = F.interpolate(global_feat, size=(local_feat.size(2), local_feat.size(3)), mode='bilinear',
                                        align_corners=False)

        # 融合特征
        fused = torch.cat([local_feat, global_feat], dim=1)
        return self.fusion(fused)  # (B, fused_dim)

# -----------------------
# 测试
# -----------------------

if __name__ == "__main__":
    model = MultiModalFusion(fused_dim=512)
    model.eval()

    dummy_local = torch.randn(4, 3, 3, 32)  # [B, C=3, H=3, P=32]
    dummy_global = torch.randn(4, 3, 3, 32)

    with torch.no_grad():
        out = model(dummy_local, dummy_global)
        print("Fused output:", out.shape)  # → [4, 512]
