import torch.nn as nn
import torch.nn.functional as F
from torchvision.models.resnet import resnet50


# === 1️⃣ Pixel Decoder (用简单的卷积代替 FPN) ===
class PixelDecoder(nn.Module):
    def __init__(self, in_channels=2048, out_channels=256):
        super().__init__()
        self.proj = nn.Conv2d(in_channels, out_channels, 1)

    def forward(self, x):
        return self.proj(x)


# === 2️⃣ Transformer Decoder Layer ===
class TransformerDecoderLayer(nn.Module):
    def __init__(self, d_model=256, nhead=8, dim_feedforward=1024):
        super().__init__()
        self.self_attn = nn.MultiheadAttention(d_model, nhead)
        self.cross_attn = nn.MultiheadAttention(d_model, nhead)
        self.ffn = nn.Sequential(
            nn.Linear(d_model, dim_feedforward),
            nn.ReLU(),
            nn.Linear(dim_feedforward, d_model)
        )
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)

    def forward(self, tgt, memory):
        tgt2 = self.self_attn(tgt, tgt, tgt)[0]
        tgt = self.norm1(tgt + tgt2)

        tgt2 = self.cross_attn(tgt, memory, memory)[0]
        tgt = self.norm2(tgt + tgt2)

        tgt2 = self.ffn(tgt)
        tgt = self.norm3(tgt + tgt2)
        return tgt


# === 3️⃣ MaskFormer 主体 ===
class MaskFormer(nn.Module):
    def __init__(self, num_classes=21, num_queries=100, hidden_dim=256):
        super().__init__()
        # Backbone (ResNet50)
        backbone = resnet50(pretrained=True)
        self.backbone = nn.Sequential(*list(backbone.children())[:-2])
        self.pixel_decoder = PixelDecoder(in_channels=2048, out_channels=hidden_dim)

        # Transformer Decoder
        self.query_embed = nn.Embedding(num_queries, hidden_dim)
        self.decoder = TransformerDecoderLayer(d_model=hidden_dim, nhead=8)

        # Heads
        self.class_head = nn.Linear(hidden_dim, num_classes + 1)  # +1 for "no object"
        self.mask_head = nn.Conv2d(hidden_dim, num_queries, 1)

    def forward(self, x):
        feats = self.backbone(x)
        src = self.pixel_decoder(feats)  # [B, 256, H/32, W/32]

        bs, c, h, w = src.shape
        src_flat = src.flatten(2).permute(2, 0, 1)  # [HW, B, C]
        queries = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)  # [num_queries, B, C]

        hs = self.decoder(queries, src_flat)  # [num_queries, B, C]
        hs = hs.permute(1, 0, 2)  # [B, num_queries, C]

        class_logits = self.class_head(hs)  # [B, num_queries, num_classes+1]

        mask_feats = F.interpolate(src, scale_factor=4, mode="bilinear", align_corners=False)
        mask_logits = self.mask_head(mask_feats)  # [B, num_queries, H, W]

        return {"pred_logits": class_logits, "pred_masks": mask_logits}
