import torch
import torch.nn as nn
import torch.nn.functional as F

class NSKGBEVEnhancer(nn.Module):
    """使用nSKG知识增强BEV特征的模块"""
    
    def __init__(self, 
                 bev_channels=256,
                 nskg_channels=256,
                 hidden_channels=128,
                 bev_h=200,
                 bev_w=200,
                 use_attention=True):
        super().__init__()
        self.bev_channels = bev_channels
        self.nskg_channels = nskg_channels
        self.hidden_channels = hidden_channels
        self.bev_h = bev_h
        self.bev_w = bev_w
        self.use_attention = use_attention
        
        # BEV特征投影
        self.bev_proj = nn.Sequential(
            nn.Conv2d(bev_channels, hidden_channels, kernel_size=1),
            nn.BatchNorm2d(hidden_channels),
            nn.ReLU(inplace=True)
        )
        
        # nSKG特征投影
        self.nskg_proj = nn.Sequential(
            nn.Linear(nskg_channels, hidden_channels),
            nn.LayerNorm(hidden_channels),
            nn.ReLU(inplace=True)
        )
        
        if use_attention:
            # 注意力机制
            self.attention = CrossAttention(
                query_dim=hidden_channels,
                key_dim=hidden_channels,
                value_dim=hidden_channels,
                num_heads=4
            )
        
        # 融合层
        self.fusion = nn.Sequential(
            nn.Conv2d(hidden_channels * 2, bev_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(bev_channels),
            nn.ReLU(inplace=True)
        )
    
    def forward(self, bev_feat, nskg_node_feat, nskg_global_feat, nskg_pos=None):
        """
        融合BEV特征和nSKG特征
        
        Args:
            bev_feat: BEV特征 [B, C, H, W]
            nskg_node_feat: nSKG节点特征 [N, C] 或 {node_type: tensor}
            nskg_global_feat: nSKG全局特征 [B, C]
            nskg_pos: nSKG节点位置 [N, 3] 或 {node_type: tensor}
            
        Returns:
            增强后的BEV特征 [B, C, H, W]
        """
        batch_size, _, bev_h, bev_w = bev_feat.shape
        
        # 投影BEV特征
        bev_proj = self.bev_proj(bev_feat)  # [B, hidden_C, H, W]
        
        # 处理nSKG全局特征
        nskg_global = self.nskg_proj(nskg_global_feat)  # [B, hidden_C]
        
        # 将全局特征扩展为BEV特征形状
        nskg_global_2d = nskg_global.view(batch_size, self.hidden_channels, 1, 1).expand(-1, -1, bev_h, bev_w)
        
        if self.use_attention and isinstance(nskg_node_feat, torch.Tensor):
            # 使用注意力机制融合节点特征
            # 将BEV特征重塑为序列
            bev_seq = bev_proj.flatten(2).permute(0, 2, 1)  # [B, H*W, hidden_C]
            
            # 投影nSKG节点特征
            nskg_node = self.nskg_proj(nskg_node_feat)  # [N, hidden_C]
            
            # 应用注意力
            attended_feat = self.attention(bev_seq, nskg_node.unsqueeze(0), nskg_node.unsqueeze(0))
            
            # 重塑回BEV形状
            attended_feat = attended_feat.permute(0, 2, 1).reshape(batch_size, self.hidden_channels, bev_h, bev_w)
            
            # 融合
            enhanced_feat = self.fusion(torch.cat([bev_proj, attended_feat], dim=1))
        else:
            # 简单融合全局特征
            enhanced_feat = self.fusion(torch.cat([bev_proj, nskg_global_2d], dim=1))
        
        return enhanced_feat


class CrossAttention(nn.Module):
    """跨模态注意力模块"""
    
    def __init__(self, query_dim, key_dim, value_dim, num_heads=4):
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = query_dim // num_heads
        self.scale = self.head_dim ** -0.5
        
        self.q_proj = nn.Linear(query_dim, query_dim)
        self.k_proj = nn.Linear(key_dim, query_dim)
        self.v_proj = nn.Linear(value_dim, query_dim)
        self.out_proj = nn.Linear(query_dim, query_dim)
    
    def forward(self, query, key, value):
        """
        跨模态注意力
        
        Args:
            query: 查询张量 [B, L_q, C]
            key: 键张量 [B, L_k, C]
            value: 值张量 [B, L_v, C]
            
        Returns:
            注意力输出 [B, L_q, C]
                """
        batch_size, seq_len, _ = query.shape
        
        # 投影查询、键、值
        q = self.q_proj(query).reshape(batch_size, seq_len, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        k = self.k_proj(key).reshape(batch_size, key.size(1), self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        v = self.v_proj(value).reshape(batch_size, value.size(1), self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        
        # 计算注意力分数
        attn = torch.matmul(q, k.transpose(-2, -1)) * self.scale
        attn = F.softmax(attn, dim=-1)
        
        # 应用注意力
        out = torch.matmul(attn, v)
        out = out.permute(0, 2, 1, 3).reshape(batch_size, seq_len, -1)
        out = self.out_proj(out)
        
        return out