import torch
import torch.nn as nn
import torch.nn.functional as F

class GATLayer(nn.Module):
    def __init__(self, input_dim, output_dim, num_heads=8, dropout=0.6, concat=True):
        super(GATLayer, self).__init__()
        self.num_heads = num_heads
        self.dropout = dropout
        self.concat = concat
        
        # 定义权重矩阵（支持多头）
        self.W = nn.Parameter(torch.Tensor(num_heads, input_dim, output_dim))
        nn.init.xavier_uniform_(self.W.data)
        
        # 定义注意力参数
        self.a = nn.Parameter(torch.Tensor(num_heads, 2 * output_dim, 1))
        nn.init.xavier_uniform_(self.a.data)
        
        # LeakyReLU激活函数
        self.leakyrelu = nn.LeakyReLU(0.2)
        
    def forward(self, x, edge_index):
        """
        x: (batch_size, num_nodes, input_dim)
        edge_index: (2, num_edges)
        """
        batch_size, num_nodes, feat_dim = x.size()
        
        # 扩展维度以适应多头注意力
        x = x.unsqueeze(1).expand(-1, self.num_heads, -1, -1)  # (batch_size, heads, nodes, feat_dim)
        W = self.W.unsqueeze(0).expand(batch_size, -1, -1, -1)  # (batch_size, heads, input_dim, output_dim)
        
        # 线性变换
        x = torch.matmul(x, W)  # (batch_size, heads, nodes, output_dim)
        
        # 准备注意力计算（拼接源节点和目标节点表示）
        edge_src = x[:, :, edge_index[0]]  # (batch_size, heads, E, output_dim)
        edge_dst = x[:, :, edge_index[1]]  # (batch_size, heads, E, output_dim)
        a_input = torch.cat([edge_src, edge_dst], dim=-1)  # (batch_size, heads, E, 2*output_dim)
        
        # 计算注意力分数
        e = torch.matmul(a_input, self.a).squeeze(-1)  # (batch_size, heads, E)
        e = self.leakyrelu(e)
        
        # 清除自环边的注意分数
        mask = edge_index[0] != edge_index[1]
        e[:, :, ~mask] = float('-inf')
        
        # 归一化注意力权重
        alpha = F.softmax(e, dim=-1)  # (batch_size, heads, E)
        
        # 应用注意力权重并聚合消息
        alpha = F.dropout(alpha, p=self.dropout, training=self.training)
        out = alpha.unsqueeze(-1) * edge_src  # (batch_size, heads, E, output_dim)
        
        # 使用scatter_add进行高效聚合
        agg = torch.zeros_like(x)
        agg.scatter_add_(dim=2, 
                        index=edge_index[1].view(1, 1, -1, 1).expand(batch_size, self.num_heads, -1, x.size(-1)),
                        src=out)
        
        # 残差连接
        if self.concat:
            return F.elu(agg)  # (batch_size, heads, nodes, output_dim)
        else:
            return agg.mean(dim=1)  # (batch_size, nodes, output_dim)

class FullyConnectedGAT(nn.Module):
    def __init__(self, input_dim, output_dim, hidden_dim=8, num_heads=8, dropout=0.6):
        super(FullyConnectedGAT, self).__init__()
        self.num_heads = num_heads
        self.dropout = dropout
        
        self.gat_layers = nn.ModuleList([
            GATLayer(input_dim=input_dim, 
                    output_dim=hidden_dim,
                    num_heads=num_heads,
                    dropout=dropout,
                    concat=True),
            GATLayer(input_dim=hidden_dim*num_heads,
                    output_dim=output_dim,
                    num_heads=1,
                    dropout=dropout,
                    concat=False)
        ])
        self.norm = nn.LayerNorm(hidden_dim*num_heads)
        self.dropout_layer = nn.Dropout(dropout)

    def generate_full_edges(self, num_nodes):
        """生成全连接图的边索引"""
        node_indices = torch.arange(num_nodes)
        src, dst = torch.meshgrid(node_indices, node_indices, indexing='ij')
        mask = src != dst  # 排除自环
        return torch.stack([src[mask], dst[mask]], dim=0)  # (2, num_edges)

    def forward(self, x):
        """
        x: (batch_size, num_nodes, input_dim)
        输出: (batch_size, num_nodes, output_dim)
        """
        batch_size, num_nodes, _ = x.shape
        
        # 生成全连接边索引（适用于所有样本）
        edge_index = self.generate_full_edges(num_nodes).to(x.device)  # (2, E)
        
        # 第一层GAT
        x = self.gat_layers[0](x, edge_index)
        x = x.reshape(batch_size, num_nodes, -1)  # (batch_size, nodes, hidden_dim*heads)
        x = self.norm(x)
        x = F.elu(x)
        x = self.dropout_layer(x)
        
        # 第二层GAT
        x = self.gat_layers[1](x, edge_index)
        return x

# 测试案例
if __name__ == "__main__":
    # 超参数
    batch_size = 2
    node_num = 5
    input_dim = 10
    output_dim = 3
    hidden_dim = 8
    
    # 初始化模型
    model = FullyConnectedGAT(input_dim, output_dim, hidden_dim)
    
    # 生成测试数据
    x = torch.randn(batch_size, node_num, input_dim)
    edge_index = model.generate_full_edges(node_num)
    
    # 前向传播
    output = model(x)
    # [2,5,3]
    # 验证输出维度
    print(f"输入维度: {x.shape}")
    print(f"输出维度: {output.shape}")  # 应该输出 torch.Size([2, 5, 3])
    print(f"边索引形状: {edge_index.shape}")

# 预期输出:
# 输入维度: torch.Size([2, 5, 10])
# 输出维度: torch.Size([2, 5, 3])
# 边索引形状: torch.Size([2, 20]) （5个节点的全连接图有20条边：5*4=20）