# @Author: Zhixuan.Wang
# @IDE: PyCharm
# @Project: multimodal
# @File: DETR_Model.py
# @Time: 2025/11/9 15:47
# @Description: 构建DETR模型

import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet50


# 🔹 正弦位置编码 (DETR 原论文实现)
class PositionEmbeddingSine(nn.Module):
    def __init__(self, num_pos_feats=128, temperature=10000):
        super().__init__()
        self.num_pos_feats = num_pos_feats
        self.temperature = temperature

    def forward(self, x):
        # x: [B, C, H, W]
        b, c, h, w = x.shape
        device = x.device
        # 构建序列位置坐标
        y_embed = torch.arange(h, device=device).unsqueeze(1).repeat(1, w)
        x_embed = torch.arange(w, device=device).unsqueeze(0).repeat(h, 1)
        # 位置坐标归一化
        eps = 1e-6
        y_embed = y_embed / (h + eps)
        x_embed = x_embed / (w + eps)
        # 计算波长/频率，用以平滑距离
        dim_t = torch.arange(self.num_pos_feats, device=device)
        dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
        # 计算位置
        pos_x = x_embed[:, :, None] / dim_t
        pos_y = y_embed[:, :, None] / dim_t
        # 拼接，偶数维用 sin，奇数维用 cos，形成交替的正余弦位置编码
        pos_x = torch.stack(
            (pos_x[..., 0::2].sin(), pos_x[..., 1::2].cos()), dim=3).flatten(2)
        pos_y = torch.stack(
            (pos_y[..., 0::2].sin(), pos_y[..., 1::2].cos()), dim=3).flatten(2)
        # 拼接 xy
        pos = torch.cat((pos_y, pos_x), dim=2).permute(2, 0, 1)  # [C, H, W]
        # 将通道维放前、batch维放最前，保证输出格式
        return pos.unsqueeze(0).repeat(b, 1, 1, 1)  # [B, C, H, W]


# 🔹 Transformer 模块
class Transformer(nn.Module):
    def __init__(self, d_model=256, nhead=8, num_encoder_layers=6,
                 num_decoder_layers=6, dim_feedforward=2048, dropout=0.1):
        super().__init__()
        # 编码器256维、8头、6层、2048维、0.1随机抛弃
        encoder_layer = nn.TransformerEncoderLayer(d_model, nhead, dim_feedforward, dropout)
        # 解码器
        decoder_layer = nn.TransformerDecoderLayer(d_model, nhead, dim_feedforward, dropout)
        self.encoder = nn.TransformerEncoder(encoder_layer, num_encoder_layers)
        self.decoder = nn.TransformerDecoder(decoder_layer, num_decoder_layers)
        self._reset_parameters()

    def _reset_parameters(self):
        # 权重的初始化
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, src, query_embed, pos_embed):
        # src: [HW, B, C]
        # pos_embed: [HW, B, C]
        # query_embed: [num_queries, B, C]
        memory = self.encoder(src + pos_embed)
        tgt = torch.zeros_like(query_embed)
        hs = self.decoder(tgt, memory)
        # ✅ 保留所有 query 的维度，返回 [num_decoder_layers, B, num_queries, C]
        return hs.unsqueeze(0).permute(0, 2, 1, 3)


# 🔹 主 DETR 模型
class DETR(nn.Module):
    def __init__(self, num_classes=3, num_queries=100,
                 hidden_dim=256, pretrained=True):
        super().__init__()

        # --- Backbone ---
        backbone = resnet50(weights='IMAGENET1K_V1' if pretrained else None)
        backbone.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.backbone = nn.Sequential(*list(backbone.children())[:-2])
        self.conv_proj = nn.Conv2d(2048, hidden_dim, 1)

        # --- Transformer ---
        self.transformer = Transformer(d_model=hidden_dim, nhead=8,
                                       num_encoder_layers=6, num_decoder_layers=6)

        # --- 位置编码 & 查询 ---
        self.position_embedding = PositionEmbeddingSine(hidden_dim // 2)
        self.query_embed = nn.Embedding(num_queries, hidden_dim)

        # --- 预测头 ---
        self.class_embed = nn.Linear(hidden_dim, num_classes)
        self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)

    def forward(self, x):
        # 1️⃣ 提取特征
        features = self.backbone(x)  # [B, 2048, H/32, W/32]
        src = self.conv_proj(features)  # [B, 256, H/32, W/32]
        b, c, h, w = src.shape
        pos = self.position_embedding(src)  # [B, 256, H/32, W/32]

        # 2️⃣ Transformer 输入准备
        src = src.flatten(2).permute(2, 0, 1)  # [HW, B, C]
        pos = pos.flatten(2).permute(2, 0, 1)
        query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, b, 1)  # [num_queries, B, C]

        # 3️⃣ Transformer 前向传播
        hs = self.transformer(src, query_embed, pos)  # [num_decoder_layers, B, num_queries, C]
        hs = hs[-1]  # ✅ 取最后一层输出 [B, num_queries, C]

        # 4️⃣ 分类 & 回归预测
        outputs_class = self.class_embed(hs)  # [B, num_queries, num_classes]
        outputs_coord = self.bbox_embed(hs).sigmoid()  # [B, num_queries, 4]

        return {'pred_logits': outputs_class, 'pred_boxes': outputs_coord}


# 🔹 多层感知机 (MLP)
class MLP(nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
        super().__init__()
        layers = []
        for i in range(num_layers - 1):
            layers.append(nn.Linear(input_dim if i == 0 else hidden_dim, hidden_dim))
            layers.append(nn.ReLU())
        layers.append(nn.Linear(hidden_dim, output_dim))
        self.layers = nn.Sequential(*layers)

    def forward(self, x):
        return self.layers(x)

# # 🔹 快速测试
# if __name__ == "__main__":
#     model = DETR(num_classes=3, num_queries=100)
#     dummy = torch.randn(2, 3, 512, 512)
#     out = model(dummy)
#     print("✅ 输出键:", out.keys())
#     print("分类输出:", out['pred_logits'].shape)
#     print("坐标输出:", out['pred_boxes'].shape)
