import torch
import torch.nn as nn
from torch_geometric.nn import GraphSAGE, GATConv

class NSTPEncoder(nn.Module):
    """nSTP图数据编码器"""
    
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers=3, 
                 gnn_type='graphsage', dropout=0.1, aggr='mean'):
        super().__init__()
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.num_layers = num_layers
        self.gnn_type = gnn_type
        
        # 输入特征投影
        self.input_proj = nn.Linear(in_channels, hidden_channels)
        
        # 图神经网络
        if gnn_type == 'graphsage':
            self.gnn = GraphSAGE(
                in_channels=hidden_channels,
                hidden_channels=hidden_channels,
                num_layers=num_layers,
                out_channels=out_channels,
                dropout=dropout,
                aggr=aggr
            )
        elif gnn_type == 'gat':
            # 简化版GAT实现
            self.gnn_layers = nn.ModuleList()
            self.gnn_layers.append(GATConv(hidden_channels, hidden_channels))
            for _ in range(num_layers - 2):
                self.gnn_layers.append(GATConv(hidden_channels, hidden_channels))
            self.gnn_layers.append(GATConv(hidden_channels, out_channels))
            self.dropout = nn.Dropout(dropout)
        else:
            raise ValueError(f"不支持的GNN类型: {gnn_type}")
        
    def forward(self, data):
        """前向传播
        
        Args:
            data: PyG Data对象或包含x和edge_index的字典
                
        Returns:
            torch.Tensor: 节点特征
        """
        # 添加对None的处理
        if data is None:
            # 返回一个空的特征张量
            return torch.zeros((1, self.out_channels), device=self.input_proj.weight.device)
            
        if hasattr(data, 'x') and hasattr(data, 'edge_index'):
            x, edge_index = data.x, data.edge_index
        elif isinstance(data, dict) and 'x' in data and 'edge_index' in data:
            x, edge_index = data['x'], data['edge_index']
        else:
            print(f"警告: 输入数据格式不正确: {type(data)}")
            # 返回一个空的特征张量
            return torch.zeros((1, self.out_channels), device=self.input_proj.weight.device)
        
        # 确保x和edge_index是张量
        if not isinstance(x, torch.Tensor):
            x = torch.tensor(x, dtype=torch.float, device=self.input_proj.weight.device)
        if not isinstance(edge_index, torch.Tensor):
            edge_index = torch.tensor(edge_index, dtype=torch.long, device=self.input_proj.weight.device)
                
        # 特征投影
        x = self.input_proj(x)
        
        # 图神经网络处理
        if self.gnn_type == 'graphsage':
            x = self.gnn(x, edge_index)
        else:  # gat
            for i, layer in enumerate(self.gnn_layers):
                if i < len(self.gnn_layers) - 1:
                    x = layer(x, edge_index)
                    x = torch.relu(x)
                    x = self.dropout(x)
                else:
                    x = layer(x, edge_index)
                    
        return x


class NSTPEnhancer(nn.Module):
    """nSTP特征增强器，用于增强BEV特征"""
    
    def __init__(self, bev_channels, nstp_channels, hidden_channels, bev_h, bev_w, use_attention=True):
        super().__init__()
        self.bev_channels = bev_channels
        self.nstp_channels = nstp_channels
        self.hidden_channels = hidden_channels
        self.bev_h = bev_h
        self.bev_w = bev_w
        self.use_attention = use_attention
        
        # 特征融合层
        self.nstp_proj = nn.Linear(nstp_channels, hidden_channels)
        self.bev_proj = nn.Linear(bev_channels, hidden_channels)
        
        if use_attention:
            # 注意力机制
            self.query_proj = nn.Linear(hidden_channels, hidden_channels)
            self.key_proj = nn.Linear(hidden_channels, hidden_channels)
            self.value_proj = nn.Linear(hidden_channels, hidden_channels)
            self.attention_scale = hidden_channels ** -0.5
            
        # 输出投影
        self.output_proj = nn.Linear(hidden_channels, bev_channels)
        
    def forward(self, bev_feat, nstp_feat, nstp_pos=None):
        """前向传播
        
        Args:
            bev_feat (torch.Tensor): BEV特征 [B, C, H, W]
            nstp_feat (torch.Tensor): nSTP节点特征 [B, N, C]
            nstp_pos (torch.Tensor, optional): nSTP节点位置 [B, N, 2]
            
        Returns:
            torch.Tensor: 增强后的BEV特征 [B, C, H, W]
        """
        B, C, H, W = bev_feat.shape
        bev_feat_flat = bev_feat.flatten(2).permute(0, 2, 1)  # [B, H*W, C]
        
        # 特征投影
        bev_feat_proj = self.bev_proj(bev_feat_flat)  # [B, H*W, hidden]
        nstp_feat_proj = self.nstp_proj(nstp_feat)  # [B, N, hidden]
        
        if self.use_attention:
            # 计算注意力
            query = self.query_proj(bev_feat_proj)  # [B, H*W, hidden]
            key = self.key_proj(nstp_feat_proj)  # [B, N, hidden]
            value = self.value_proj(nstp_feat_proj)  # [B, N, hidden]
            
            # 注意力分数
            attn = torch.bmm(query, key.transpose(1, 2)) * self.attention_scale  # [B, H*W, N]
            attn = torch.softmax(attn, dim=-1)
            
            # 加权特征
            context = torch.bmm(attn, value)  # [B, H*W, hidden]
            
            # 融合特征
            enhanced_feat = context + bev_feat_proj
        else:
            # 简单平均
            nstp_feat_expanded = nstp_feat_proj.mean(dim=1, keepdim=True).expand(-1, H*W, -1)
            enhanced_feat = bev_feat_proj + nstp_feat_expanded
            
        # 输出投影
        enhanced_feat = self.output_proj(enhanced_feat)  # [B, H*W, C]
        
        # 重塑为BEV特征
        enhanced_feat = enhanced_feat.permute(0, 2, 1).reshape(B, C, H, W)
        
        return enhanced_feat