import torch
import torch.nn as nn
import torch.nn.functional as F

__all__ = ['CrossModalAttention']

class CrossModalAttention(nn.Module):
    def __init__(self, dim: int, num_heads: int = 1, temperature: float = 0.05):
        super().__init__()
        self.dim = dim
        self.num_heads = num_heads
        self.temperature = temperature
        self.scale = dim ** -0.5

        # 图像侧QKV生成
        self.img_q = nn.Parameter(torch.randn(dim, dim))
        self.img_k = nn.Parameter(torch.randn(dim, dim))
        self.img_v = nn.Parameter(torch.randn(dim, dim))

        # 文本侧QKV生成
        self.txt_q = nn.Parameter(torch.randn(dim, dim))
        self.txt_k = nn.Parameter(torch.randn(dim, dim))
        self.txt_v = nn.Parameter(torch.randn(dim, dim))

        # 输出融合
        self.img_fuse = nn.Linear(2*dim, dim)
        self.txt_fuse = nn.Linear(2*dim, dim)

    def forward(self, img_feats, txt_feats):
        """
        输入：
            img_feats : [B, D] 图像特征
            txt_feats : [B, D] 文本特征
        输出：
            fused_img : [B, D] 融合后的图像特征
            fused_txt : [B, D] 融合后的文本特征
        """
        B = img_feats.size(0)

        img_feats_norm = F.normalize(img_feats, p=2, dim=-1)  # [B, D]
        txt_feats_norm = F.normalize(txt_feats, p=2, dim=-1)  # [B, D]
        
        # -------------------------
        # 生成QKV
        # -------------------------
        # 图像侧QKV
        img_q_norm = F.normalize(self.img_q, p=2, dim=0)  # [D, D]
        img_k_norm = F.normalize(self.img_k, p=2, dim=0)  # [D, D]
        img_v_norm = F.normalize(self.img_v, p=2, dim=0)  # [D, D]

        
        Qi = img_feats_norm @ img_q_norm  # [B, D]
        Ki = img_feats_norm @ img_k_norm  # [B, D]
        Vi = img_feats_norm @ img_v_norm  # [B, D]

        
        # 文本侧QKV
        txt_q_norm = F.normalize(self.txt_q, p=2, dim=0)  # [D, D]
        txt_k_norm = F.normalize(self.txt_k, p=2, dim=0)  # [D, D]
        txt_v_norm = F.normalize(self.txt_v, p=2, dim=0)  # [D, D]

        Qt = txt_feats_norm @ txt_q_norm  # [B, D]
        Kt = txt_feats_norm @ txt_k_norm  # [B, D]
        Vt = txt_feats_norm @ txt_v_norm  # [B, D]

        # -------------------------
        # 跨模态注意力计算
        # -------------------------
        # 图像到文本注意力（图像作为Query）
        attn_i2t = torch.matmul(Qi, Kt.transpose(-1, -2)) * self.scale  # [B, B]
        attn_i2t = F.softmax(attn_i2t / self.temperature, dim=-1)
        attended_img = torch.matmul(attn_i2t, Vt)  # [B, D]

        # 文本到图像注意力（文本作为Query）
        attn_t2i = torch.matmul(Qt, Ki.transpose(-1, -2)) * self.scale
        attn_t2i = F.softmax(attn_t2i / self.temperature, dim=-1)
        attended_txt = torch.matmul(attn_t2i, Vi)

        # -------------------------
        # 残差融合
        # -------------------------
        fused_img = self.img_fuse(torch.cat([img_feats, attended_img], dim=-1))
        fused_txt = self.txt_fuse(torch.cat([txt_feats, attended_txt], dim=-1))
        
        return fused_img, fused_txt