import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.loader import DataLoader
from ogb.graphproppred import PygGraphPropPredDataset

# 临时解决方案：为PyTorch 2.6+兼容性修改torch.load的行为
import functools
original_torch_load = torch.load
torch.load = functools.partial(original_torch_load, weights_only=False)

# 定义GAE模型
class GAE(nn.Module):
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(GAE, self).__init__()
        self.encode = nn.Linear(in_channels, hidden_channels)
        self.decode = nn.Linear(hidden_channels, out_channels)
        # 添加全局平均池化层来聚合节点表示
        self.pool = nn.AdaptiveAvgPool1d(1)

    def forward(self, x, edge_index, batch=None):
        x = F.relu(self.encode(x))
        x = self.decode(x)
        # 如果有batch信息，使用全局平均池化聚合节点表示
        if batch is not None:
            # 将x按图进行分组并聚合
            # x的形状: [num_nodes, out_channels]
            # 聚合后形状: [batch_size, out_channels]
            batch_size = batch.max().item() + 1
            feature_dim = x.size(1)
            # 创建一个形状为[batch_size, feature_dim, max_nodes_per_graph]的张量
            # 然后使用自适应平均池化得到[batch_size, feature_dim]
            out = torch.zeros(batch_size, feature_dim, dtype=x.dtype, device=x.device)
            for i in range(batch_size):
                mask = (batch == i)
                graph_features = x[mask]  # [num_nodes_in_graph, feature_dim]
                # 对每个图的所有节点特征取平均
                out[i] = graph_features.mean(dim=0)
            return out
        return x

# 加载ogbg-molhiv数据集
dataset = PygGraphPropPredDataset(name="ogbg-molhiv")
split_idx = dataset.get_idx_split()
train_idx = split_idx["train"]
val_idx = split_idx["valid"]
test_idx = split_idx["test"]

train_data = dataset[train_idx]
val_data = dataset[val_idx]
test_data = dataset[test_idx]

train_loader = DataLoader(train_data, batch_size=32, shuffle=True)
val_loader = DataLoader(val_data, batch_size=32, shuffle=False)
test_loader = DataLoader(test_data, batch_size=32, shuffle=False)

# 检查数据集属性以正确设置模型参数
sample_data = dataset[0]
in_channels = sample_data.x.size(1)
out_channels = sample_data.y.size(1) if sample_data.y.dim() > 1 else 1

print(f"输入特征维度: {in_channels}")
print(f"输出特征维度: {out_channels}")

# 初始化模型、损失函数和优化器
model = GAE(in_channels, 128, out_channels)
criterion = nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 训练模型
for epoch in range(100):
    model.train()
    total_loss = 0
    num_batches = 0
    for data in train_loader:
        x, edge_index, y, batch = data.x, data.edge_index, data.y, data.batch
        # 确保数据类型正确
        x = x.float()
        y = y.float()
        # 确保y的形状与模型输出匹配
        if y.dim() == 1:
            y = y.unsqueeze(1)
            
        optimizer.zero_grad()
        out = model(x, edge_index, batch)
        loss = criterion(out, y)
        loss.backward()
        optimizer.step()
        
        total_loss += loss.item()
        num_batches += 1
        
    print(f'Epoch: {epoch}, Loss: {total_loss / num_batches}')

# 评估模型
model.eval()
with torch.no_grad():
    val_loss = 0
    val_acc = 0
    num_val_batches = 0
    for data in val_loader:
        x, edge_index, y, batch = data.x, data.edge_index, data.y, data.batch
        # 确保数据类型正确
        x = x.float()
        y = y.float()
        # 确保y的形状与模型输出匹配
        if y.dim() == 1:
            y = y.unsqueeze(1)
            
        out = model(x, edge_index, batch)
        loss = criterion(out, y)
        val_loss += loss.item()
        pred = torch.sigmoid(out)
        acc = (pred.round() == y).sum().float() / y.numel()
        val_acc += acc.item()
        num_val_batches += 1
    print(f'Val Loss: {val_loss / num_val_batches}, Val Acc: {val_acc / num_val_batches}')

    test_loss = 0
    test_acc = 0
    num_test_batches = 0
    for data in test_loader:
        x, edge_index, y, batch = data.x, data.edge_index, data.y, data.batch
        # 确保数据类型正确
        x = x.float()
        y = y.float()
        # 确保y的形状与模型输出匹配
        if y.dim() == 1:
            y = y.unsqueeze(1)
            
        out = model(x, edge_index, batch)
        loss = criterion(out, y)
        test_loss += loss.item()
        pred = torch.sigmoid(out)
        acc = (pred.round() == y).sum().float() / y.numel()
        test_acc += acc.item()
        num_test_batches += 1
    print(f'Test Loss: {test_loss / num_test_batches}, Test Acc: {test_acc / num_test_batches}')