import torch

dataset = torch.load("../data/TWeibo_data.pt")

valid_subgraphs = []
for i, g in enumerate(dataset):
    if not isinstance(g, dict):
        print(f"警告: 子图 {i} 不是字典类型 ({type(g)})，已跳过")
        continue
    if 'y' not in g or g['y'] is None:
        print(f"警告: 子图 {i} 缺少有效标签，已跳过")
        continue
    valid_subgraphs.append(g)

if valid_subgraphs:

    num_nodes = valid_subgraphs[0]['x'].shape[0]
    num_edges = valid_subgraphs[0]['edge_index'].shape[1]
    num_features = valid_subgraphs[0]['x'].shape[1]
    
    all_labels = torch.cat([g['y'] for g in valid_subgraphs])
    unique_labels = torch.unique(all_labels)
    num_classes = len(unique_labels)
    
    print(f"有效子图数量: {len(valid_subgraphs)}/{len(dataset)}")
    print("\n数据集统计信息:")
    print(f"- 节点数: {num_nodes}")
    print(f"- 边数: {num_edges}")
    print(f"- 特征维度: {num_features}")
    print(f"- 有效标签数量: {len(all_labels)}")
    print(f"- 类别数: {num_classes}")

    if num_classes > 0:
        print("\n类别分布:")
        counts = all_labels.bincount(minlength=num_classes)
        for idx in range(num_classes):
            print(f"  类别 {idx}: {counts[idx].item()} 节点 ({counts[idx].item()/len(all_labels)*100:.1f}%)")
else:
    print("错误: 未找到有效的子图数据")