import torch
import torch.nn as nn
from torch.utils.checkpoint import checkpoint
from torch.amp import autocast

class CrossAttentionModule(nn.Module):
    def __init__(self, dropout, text_dim=512, image_dim=768, num_heads=8):
        super(CrossAttentionModule, self).__init__()
        self.dropout = dropout
        
        # 确保特征维度可以被 num_heads 整除
        assert text_dim % num_heads == 0, "text_dim must be divisible by num_heads"
        assert image_dim % num_heads == 0, "image_dim must be divisible by num_heads"
        
        # 定义线性投影层，用于对齐特征维度
        self.text_proj = nn.Linear(text_dim, image_dim)  # 将文本特征投影到图像特征维度
        self.image_proj = nn.Linear(image_dim, text_dim)  # 将图像特征投影到文本特征维度
        
        # 定义多头注意力模块
        self.cross_attn_text_to_image = nn.MultiheadAttention(dropout=self.dropout, embed_dim=image_dim, num_heads=num_heads, batch_first=False)
        self.cross_attn_image_to_text = nn.MultiheadAttention(dropout=self.dropout, embed_dim=text_dim, num_heads=num_heads, batch_first=False)
        self.self_image_attn = nn.MultiheadAttention(dropout=self.dropout, embed_dim=image_dim, num_heads=num_heads, batch_first=False)
        self.self_text_attn = nn.MultiheadAttention(dropout=self.dropout, embed_dim=text_dim, num_heads=num_heads, batch_first=False)

    @autocast('cuda', dtype=torch.float32)
    def forward(self, image_feature, text_feature, visu_mask, text_mask):
        """
        Args:
            image_feature: [197, 8, 768]
            text_feature: [77, 8, 512]
            visu_mask: [8, 197] (布尔掩码，True 表示需要屏蔽的位置)
            text_mask: [8, 77] (布尔掩码，True 表示需要屏蔽的位置)
        Returns:
            text_src: [77, 8, 512]
            visu_src: [197, 8, 768]
        """
        # 对齐特征维度
        text_feature_proj = self.text_proj(text_feature)  # [77, 8, 768]
        image_feature_proj = self.image_proj(image_feature)  # [197, 8, 512]
        
        # 图像到文本的交叉注意力
        text_src, _ = self.cross_attn_image_to_text(
            query=text_feature, key=image_feature_proj, value=image_feature_proj,
            key_padding_mask=visu_mask  # 直接使用 2D 布尔掩码
        )  # [77, 8, 512]
        
        # 文本到图像的交叉注意力
        visu_src, _ = self.cross_attn_text_to_image(
            query=image_feature, key=text_feature_proj, value=text_feature_proj,
            key_padding_mask=text_mask  # 直接使用 2D 布尔掩码
        )  # [197, 8, 768]

        text_src, _ = self.self_text_attn(
            query=text_src, key=text_src, value=text_src,
            key_padding_mask=text_mask  # 直接使用 2D 布尔掩码
        )
        
        visu_src, _ = self.self_image_attn(
            query=visu_src, key=visu_src, value=visu_src,
            key_padding_mask=visu_mask  # 直接使用 2D 布尔掩码
        )

        return text_src, visu_src
    
    # @autocast('cuda')
    # def forward(self, image_feature, text_feature, visu_mask, text_mask):
    #     # 对齐特征维度
    #     text_feature_proj = self.text_proj(text_feature)
    #     image_feature_proj = self.image_proj(image_feature)
        
    #     # 使用梯度检查点
    #     text_src = checkpoint(self._text_attn, text_feature, image_feature_proj, visu_mask)
    #     visu_src = checkpoint(self._image_attn, image_feature, text_feature_proj, text_mask)
        
    #     return text_src, visu_src
    
    # def _text_attn(self, query, key_value, mask):
    #     return self.cross_attn_image_to_text(
    #         query=query, key=key_value, value=key_value,
    #         key_padding_mask=mask
    #     )[0]
    
    # def _image_attn(self, query, key_value, mask):
    #     return self.cross_attn_text_to_image(
    #         query=query, key=key_value, value=key_value,
    #         key_padding_mask=mask
    #     )[0]