import torch
import torch.nn as nn

class CrossModalAttention(nn.Module):
    """跨模态注意力融合"""
    def __init__(self, config):
        super().__init__()
        self.feature_dim = config['feature_dim']
        self.num_heads = config['num_heads']
        
        # 红外特征投影
        self.infrared_proj = nn.Linear(512, self.feature_dim)
        
        # 雷达特征投影  
        self.radar_proj = nn.Linear(128, self.feature_dim)
        
        # 跨模态注意力
        self.cross_attention = nn.MultiheadAttention(
            embed_dim=self.feature_dim,
            num_heads=self.num_heads,
            dropout=0.1,
            batch_first=True
        )
        
        # 融合层
        self.fusion_layer = nn.Sequential(
            nn.Linear(self.feature_dim * 2, self.feature_dim),
            nn.ReLU(),
            nn.Dropout(0.1)
        )
    
    def forward(self, infrared_features, radar_features):
        """
        Args:
            infrared_features: [batch_size, num_nodes, infrared_dim]
            radar_features: [batch_size, num_nodes, radar_dim]
        """
        # 特征投影
        infrared_proj = self.infrared_proj(infrared_features)
        radar_proj = self.radar_proj(radar_features)
        
        # 跨模态注意力
        attended_infrared, _ = self.cross_attention(
            query=infrared_proj,
            key=radar_proj,
            value=radar_proj
        )
        
        attended_radar, _ = self.cross_attention(
            query=radar_proj,
            key=infrared_proj,
            value=infrared_proj
        )
        
        # 特征融合
        fused_features = torch.cat([attended_infrared, attended_radar], dim=-1)
        fused_features = self.fusion_layer(fused_features)
        
        return fused_features