import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn import GCNConv, GATConv, HeteroConv, SAGEConv
from torch_geometric.nn import global_mean_pool, global_max_pool

class NSKGEncoder(nn.Module):
    """处理nSKG数据的图神经网络编码器"""
    
    def __init__(self, 
                 in_channels=8,  # 位置(3) + 尺寸(3) + 朝向(2)
                 hidden_channels=64, 
                 out_channels=256,
                 num_layers=2,
                 gnn_type='gat',
                 use_hetero=True,
                 dropout=0.1):
        super().__init__()
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.num_layers = num_layers
        self.gnn_type = gnn_type
        self.use_hetero = use_hetero
        
        # 输入特征投影
        self.input_proj = nn.Linear(in_channels, hidden_channels)
        
        # GNN层
        if use_hetero:
            # 异构图网络
            self.gnn_layers = nn.ModuleList()
            for i in range(num_layers):
                in_ch = hidden_channels
                out_ch = hidden_channels if i < num_layers - 1 else out_channels
                
                self.gnn_layers.append(HeteroGNNLayer(
                    in_channels=in_ch,
                    out_channels=out_ch,
                    gnn_type=gnn_type
                ))
        else:
            # 同构图网络
            self.gnn_layers = nn.ModuleList()
            for i in range(num_layers):
                in_ch = hidden_channels if i > 0 else hidden_channels
                out_ch = hidden_channels if i < num_layers - 1 else out_channels
                
                if gnn_type == 'gcn':
                    self.gnn_layers.append(GCNConv(in_ch, out_ch))
                elif gnn_type == 'gat':
                    self.gnn_layers.append(GATConv(in_ch, out_ch, heads=4, concat=False, dropout=dropout))
                elif gnn_type == 'sage':
                    self.gnn_layers.append(SAGEConv(in_ch, out_ch))
        
        # 输出投影
        self.output_proj = nn.Sequential(
            nn.Linear(out_channels, out_channels),
            nn.LayerNorm(out_channels),
            nn.ReLU(inplace=True),
            nn.Dropout(dropout)
        )
        
        # 全局池化
        self.global_pool = GlobalPooling(out_channels)
    
    def forward(self, data):
        """
        处理图数据
        
        Args:
            data: PyTorch Geometric图数据
            
        Returns:
            node_features: 节点特征 [num_nodes, out_channels]
            global_features: 全局图特征 [1, out_channels]
        """
        if self.use_hetero:
            # 异构图处理
            node_features = {}
            
            # 对每种节点类型进行特征投影
            for node_type in data.node_types:
                if hasattr(data[node_type], 'x'):
                    node_features[node_type] = self.input_proj(data[node_type].x)
            
            # 应用GNN层
            for layer in self.gnn_layers:
                node_features = layer(node_features, data.edge_index_dict)
            
            # 输出投影
            for node_type in node_features:
                node_features[node_type] = self.output_proj(node_features[node_type])
            
            # 全局池化
            global_features = self.global_pool(node_features, data.batch_dict if hasattr(data, 'batch_dict') else None)
            
            return node_features, global_features
        else:
            # 同构图处理
            x, edge_index = data.x, data.edge_index
            
            # 特征投影
            x = self.input_proj(x)
            
            # 应用GNN层
            for layer in self.gnn_layers:
                if hasattr(layer, 'supports_edge_attr') and layer.supports_edge_attr and hasattr(data, 'edge_attr'):
                    x = layer(x, edge_index, data.edge_attr)
                else:
                    x = layer(x, edge_index)
                x = F.relu(x)
                x = F.dropout(x, p=0.1, training=self.training)
            
            # 输出投影
            x = self.output_proj(x)
            
            # 全局池化
            global_features = self.global_pool(x, data.batch if hasattr(data, 'batch') else None)
            
            return x, global_features


class HeteroGNNLayer(nn.Module):
    """异构图神经网络层"""
    
    def __init__(self, in_channels, out_channels, gnn_type='gat'):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.gnn_type = gnn_type
        
        # 将在forward中动态创建
        self.convs = None
    
    def forward(self, x_dict, edge_index_dict):
        """
        前向传播
        
        Args:
            x_dict: 节点特征字典 {node_type: tensor}
            edge_index_dict: 边索引字典 {(src_type, edge_type, dst_type): tensor}
            
        Returns:
            更新后的节点特征字典
        """
        # 首次运行时创建卷积层
        if self.convs is None:
            self.convs = HeteroConv(
                {
                    edge_key: self._create_conv(self.in_channels, self.out_channels)
                    for edge_key in edge_index_dict.keys()
                },
                aggr='mean'
            )
        
        # 应用卷积
        out_dict = self.convs(x_dict, edge_index_dict)
        
        # 应用非线性
        for node_type in out_dict:
            out_dict[node_type] = F.relu(out_dict[node_type])
        
        return out_dict
    
    def _create_conv(self, in_channels, out_channels):
        """创建卷积层"""
        if self.gnn_type == 'gcn':
            return GCNConv(in_channels, out_channels)
        elif self.gnn_type == 'gat':
            return GATConv(in_channels, out_channels, heads=4, concat=False)
        elif self.gnn_type == 'sage':
            return SAGEConv(in_channels, out_channels)
        else:
            raise ValueError(f"不支持的GNN类型: {self.gnn_type}")


class GlobalPooling(nn.Module):
    """全局池化层"""
    
    def __init__(self, in_channels):
        super().__init__()
        self.in_channels = in_channels
        
        # 池化后的特征投影
        self.proj = nn.Sequential(
            nn.Linear(in_channels * 2, in_channels),
            nn.LayerNorm(in_channels),
            nn.ReLU(inplace=True)
        )
    
    def forward(self, x, batch=None):
        """
        全局池化
        
        Args:
            x: 节点特征 [num_nodes, channels] 或 {node_type: tensor}
            batch: 批处理索引 [num_nodes] 或 {node_type: tensor}
            
        Returns:
            全局图特征
        """
        if isinstance(x, dict):
            # 异构图
            pooled_features = []
            
            for node_type, features in x.items():
                if batch is not None and node_type in batch:
                    node_batch = batch[node_type]
                    mean_pool = global_mean_pool(features, node_batch)
                    max_pool = global_max_pool(features, node_batch)
                else:
                    # 如果没有批处理索引，假设所有节点属于同一个图
                    mean_pool = torch.mean(features, dim=0, keepdim=True)
                    max_pool = torch.max(features, dim=0, keepdim=True)[0]
                
                pooled = torch.cat([mean_pool, max_pool], dim=-1)
                pooled_features.append(pooled)
            
            # 合并不同类型的节点特征
            if pooled_features:
                pooled = torch.cat(pooled_features, dim=0)
                pooled = torch.mean(pooled, dim=0, keepdim=True)
            else:
                # 如果没有节点，返回零向量
                pooled = torch.zeros(1, self.in_channels * 2, device=next(self.proj.parameters()).device)
            
            return self.proj(pooled)
        else:
            # 同构图
            if batch is not None:
                mean_pool = global_mean_pool(x, batch)
                max_pool = global_max_pool(x, batch)
            else:
                # 如果没有批处理索引，假设所有节点属于同一个图
                mean_pool = torch.mean(x, dim=0, keepdim=True)
                max_pool = torch.max(x, dim=0, keepdim=True)[0]
            
            pooled = torch.cat([mean_pool, max_pool], dim=-1)
            return self.proj(pooled)