#Todo: 调用 如下的包，实现minibatch批次训练
# from train import train_one_epoch
# Input: ./traing_data 中的数据
# output: accuracy 预测准确度

# 修改后的多批次训练代码
from train.train_one_epoch import train_one_epoch
from train.train_one_epoch_ae import train_one_epoch_as
import torch
from torch_geometric.data import DataLoader, InMemoryDataset
import os
from model.gnn_model import GNNModel
from model.graphsage_model import GraphSAGE
from model.dgi_model import DGI
from model.GraphCL_model import GraphCL
from model.GraphMAE2_model import GraphMAE2Model
from model.autoencode_model import AEModel
import matplotlib.pyplot as plt
from torch.utils.data import random_split
import pandas as pd
from sklearn.metrics import f1_score, precision_score, recall_score

plt.rcParams['font.sans-serif'] = ['Noto Sans CJK SC', 'SimHei', 'DejaVu Sans']  # 中文优先
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题
plt.subplots_adjust(hspace=0.5)
# 定义加载数据的类
class MyGraphDataset(InMemoryDataset):
    def __init__(self, root, method_name):
        self.method_name = method_name
        super().__init__(root)
        path = os.path.join(root, f"train_{method_name}.pt")
        self.data, self.slices = torch.load(path, weights_only=False)

    def __len__(self):
        # 返回图的数量
        return self.slices['x'].size(0) - 1

# 加载数据并进行划分
def load_datasets(data_dir="training_data", batch_size=32, shuffle=True, num_workers=0, split_ratio=(0.7, 0.15, 0.15)):
    """
    从数据目录加载多个图数据集，并构建 mini-batch DataLoader，支持 train/dev/test 划分。

    参数:
        data_dir (str): 数据保存目录
        batch_size (int): 每个 batch 的图数量
        shuffle (bool): 是否在每个 epoch 打乱图顺序
        num_workers (int): DataLoader 的工作线程数
        split_ratio (tuple): 数据集划分比例 (train_ratio, dev_ratio, test_ratio)

    返回:
        dict[str, dict]: 每个方法对应的 DataLoader 字典，包含 'train', 'dev', 'test'。
    """
    methods = ['kmeans', 'thr', 'gmm', 'combine']  # 可根据实际需求添加更多方法
    loaders = {}

    for method in methods:
        dataset = MyGraphDataset(data_dir, method)

        # 计算划分后的数据集大小
        total_len = len(dataset)
        train_len = int(split_ratio[0] * total_len)
        dev_len = int(split_ratio[1] * total_len)
        test_len = total_len - train_len - dev_len

        # 按照比例划分数据
        train_data, dev_data, test_data = random_split(dataset, [train_len, dev_len, test_len], generator=torch.Generator().manual_seed(42))

        # 创建对应的 DataLoader
        loaders[method] = {
            'train': DataLoader(train_data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers),
            'dev': DataLoader(dev_data, batch_size=batch_size, shuffle=False, num_workers=num_workers),
            'test': DataLoader(test_data, batch_size=batch_size, shuffle=False, num_workers=num_workers),
        }

    return loaders

"""统一前
def evaluate(model, loader, criterion, task_type, device):
    model.eval()
    total_loss = 0
    total_samples = 0
    all_preds = []
    all_labels = []

    with torch.no_grad():
        for batch in loader:
            batch = batch.to(device)
            out = model(batch.x, batch.edge_index)

            loss = criterion(out, batch.y)
            
            #total_loss += loss.item() * batch.num_graphs


            num_nodes_in_batch = batch.y.size(0)
            total_loss += loss.item() * num_nodes_in_batch
            total_samples += num_nodes_in_batch

            if task_type == 'classification':
                preds = out.argmax(dim=1)
                all_preds.extend(preds.cpu().numpy())
                all_labels.extend(batch.y.cpu().numpy())

    #num_samples = len(loader.dataset)
    #avg_loss = total_loss / num_samples if num_samples > 0 else 0

    avg_loss = total_loss / total_samples if total_samples > 0 else 0
    
    if task_type == 'classification':
        # 使用'weighted'模式计算指标，以处理类别不平衡问题
        accuracy = sum(1 for i in range(len(all_preds)) if all_preds[i] == all_labels[i]) / len(all_labels) if len(all_labels) > 0 else 0
        precision = precision_score(all_labels, all_preds, average='weighted', zero_division=0)
        recall = recall_score(all_labels, all_preds, average='weighted', zero_division=0)
        f1 = f1_score(all_labels, all_preds, average='weighted', zero_division=0)
        
        return {
            "loss": avg_loss,
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1_score": f1
        }
    else: # 回归任务
        return {"loss": avg_loss}
"""
# 统一的评估
def evaluate(model, loader, criterion, device, get_embeds_func=None):
    model.eval()
    total_loss, total_samples = 0, 0
    all_preds, all_labels = [], []

    with torch.no_grad():
        for batch in loader:
            batch = batch.to(device)
            
            features = get_embeds_func(batch) if get_embeds_func else batch.x
            
            if isinstance(model, GNNModel):
                out = model(features, batch.edge_index)
            elif isinstance(model, torch.nn.Linear):
                out = model(features)
            elif isinstance(model, GraphSAGE):
                out = model(features, batch.edge_index)
            elif isinstance(model, GraphCL):
                out = model(features, batch.edge_index)
            elif isinstance(model, GraphMAE2Model):
                out = model(features, batch.edge_index)

            # --- 此处为未来扩展保留接口 ---
            # elif isinstance(model, NewModel):
            #     # 新模型的调用方式
            #     out = model(...) 
            else:
                # 如果遇到未知模型，可以抛出错误
                raise TypeError(f"Unsupported model type in evaluate function: {type(model)}")



            loss = criterion(out, batch.y)
            
            num_nodes_in_batch = batch.y.size(0)
            total_loss += loss.item() * num_nodes_in_batch
            total_samples += num_nodes_in_batch

            preds = out.argmax(dim=1)
            all_preds.extend(preds.cpu().numpy())
            all_labels.extend(batch.y.cpu().numpy())

    avg_loss = total_loss / total_samples if total_samples > 0 else 0
    accuracy = sum(1 for i in range(len(all_preds)) if all_preds[i] == all_labels[i]) / len(all_labels) if len(all_labels) > 0 else 0
    precision = precision_score(all_labels, all_preds, average='weighted', zero_division=0)
    recall = recall_score(all_labels, all_preds, average='weighted', zero_division=0)
    f1 = f1_score(all_labels, all_preds, average='weighted', zero_division=0)
    
    return { "loss": avg_loss, "accuracy": accuracy, "precision": precision, "recall": recall, "f1_score": f1 }

def train_completion(config, visual_flag=False):
    """
    图补全模型训练：
      1. 从 mask_generator 输出目录加载完整图和残缺图
      2. 配对后构造 DataLoader
      3. 调用 train_one_epoch_as 执行每个 epoch 的训练
      4. 保存模型并可视化训练曲线
    """
    device     = config['device']
    batch_size = config['batch_size']
    epochs     = config['epochs']
    lr         = config['lr']
    test_ratio = config.get('test_ratio', 0.2)
    val_ratio  = config.get('val_ratio', 0.1)  # 从训练集中再划分10%作为验证集

    # 1. 加载 mask_generator 生成的数据 :contentReference[oaicite:0]{index=0}
    data_dir     = "./Topology_simulation/Mask_Topu_data"
    # full_list    = torch.load(os.path.join(data_dir, "train_full.pt"))
    # masked_list  = torch.load(os.path.join(data_dir, "train_masked.pt"))
    full_list = torch.load(
        os.path.join(data_dir, "train_full.pt"),
        # map_location="cpu",
        weights_only=False
    )
    masked_list = torch.load(
        os.path.join(data_dir, "train_masked.pt"),
        # map_location="cpu",
        weights_only=False
    )

    # 2. 配对并划分数据集
    pairs = list(zip(masked_list, full_list))

    # 先划分测试集
    test_size = int(len(pairs) * test_ratio)
    train_val_pairs, test_pairs = torch.utils.data.random_split(
        pairs,
        [len(pairs) - test_size, test_size],
        generator=torch.Generator().manual_seed(config.get('seed', 42))
    )

    # 再从剩余数据中划分验证集
    val_size = int(len(train_val_pairs) * val_ratio)
    train_pairs, val_pairs = torch.utils.data.random_split(
        train_val_pairs,
        [len(train_val_pairs) - val_size, val_size],
        generator=torch.Generator().manual_seed(config.get('seed', 42))
    )

    # 3. 构造 DataLoader
    train_loader = DataLoader(train_pairs, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_pairs, batch_size=batch_size, shuffle=False)
    test_loader = DataLoader(test_pairs, batch_size=batch_size, shuffle=False)

    # 4. 初始化模型与优化器
    in_dim = full_list[0].x.size(1)
    model = AEModel(
        in_dim=in_dim,
        hidden_dim=config['hidden_dim'],
        num_layers=config['num_layers'],
        dropout=config['dropout']
    ).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=lr)

    train_loss_history = []
    val_loss_history = []

    # 5. 训练循环
    best_val_loss = float('inf')
    for epoch in range(1, epochs + 1):
        model.train()
        avg_train_loss = train_one_epoch_as(model, train_loader, optimizer, device)
        train_loss_history.append(avg_train_loss)

        # 验证集评估
        model.eval()
        val_loss = 0.0
        with torch.no_grad():
            for masked, full in val_loader:
                masked = masked.to(device)
                full = full.to(device)
                reconstructed = model(masked)
                loss = model.loss_function(reconstructed, full,masked)
                val_loss += loss.item()
        avg_val_loss = val_loss / len(val_loader)
        val_loss_history.append(avg_val_loss)

        # 打印训练和验证损失
        if epoch == 1 or epoch % 10 == 0:
            print(f"[Completion] Epoch {epoch:03d}/{epochs:03d} — "
                  f"Train Loss: {avg_train_loss:.4f}, Val Loss: {avg_val_loss:.4f}")

        # 保存最佳模型
        if avg_val_loss < best_val_loss:
            best_val_loss = avg_val_loss
            save_path = config.get('model_path', 'completion_model.pth')
            torch.save(model.state_dict(), save_path)
            print(f"✅ Best model saved to {save_path} with val loss: {best_val_loss:.4f}")

    # 6. 测试集最终评估
    model.eval()
    test_loss = 0.0
    with torch.no_grad():
        for masked, full in test_loader:
            masked = masked.to(device)
            full = full.to(device)
            reconstructed = model(masked)
            loss = model.loss_function(reconstructed, full,masked)
            test_loss += loss.item()
    avg_test_loss = test_loss / len(test_loader)
    print(f"📊 Final Test Loss: {avg_test_loss:.4f}")

    # # 7. 可视化训练曲线（可选）
    # if visual_flag:
    #     plt.figure(figsize=(8, 5))
    #     plt.plot(train_loss_history, label="Train Loss")
    #     plt.plot(val_loss_history, label="Validation Loss")
    #     plt.xlabel("Epoch")
    #     plt.ylabel("Loss")
    #     plt.title("Graph Completion Training Curve")
    #     plt.legend()
    #     plt.tight_layout()
    #     plt.show(block=True)
    #
    # return model, train_loss_history, val_loss_history, avg_test_loss

# GNN模型训练
def train_gnn(config, visual_flag=False):
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    results = {}  # 用于存储每个方法的测试结果

    for method, loaders in dataset_groups.items():
        print(f"\n=== Training on {method} labels ===")
        train_loader = loaders['train']
        dev_loader = loaders['dev']
        test_loader = loaders['test']

        loss_list = []
        acc_list = []

        sample_data = next(iter(train_loader))
        in_dim = sample_data.x.size(1)
        all_labels = torch.cat([batch.y.view(-1) for batch in train_loader], dim=0)
        out_dim = int(all_labels.max().item() + 1) if config['task_type'] == 'classification' else 1
        model = GNNModel(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,num_layers=config['num_layers'],dropout=config['dropout']).to(config['device'])
        optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-4)
        criterion = torch.nn.CrossEntropyLoss() if config['task_type'] == 'classification' else torch.nn.MSELoss()

        for epoch in range(config['epochs']):
            model.train()
            total_loss = total_correct = total_samples = 0

            for batch in train_loader:
                metrics = train_one_epoch(model, batch, optimizer, criterion, config['task_type'], config['device'], train=True)
                num_nodes_in_batch = batch.y.size(0)
                total_loss += metrics['loss'] * num_nodes_in_batch
                total_samples += num_nodes_in_batch
                total_correct += metrics['acc'] * num_nodes_in_batch
            
            avg_loss = total_loss / total_samples
            avg_acc = total_correct / total_samples
            loss_list.append(avg_loss)
            acc_list.append(avg_acc)

            if not visual_flag and (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1}/{config['epochs']} | Train Loss: {avg_loss:.4f} | Train Acc: {avg_acc:.4f}")

            if (epoch + 1) % 10 == 0:
                dev_metrics = evaluate(model, dev_loader, criterion, config['device'])
                print(f"[Dev] Loss: {dev_metrics['loss']:.4f} | Acc: {dev_metrics['accuracy']:.4f} | F1: {dev_metrics['f1_score']:.4f}")


        if visual_flag:
            plt.clf()
            plt.suptitle(f"Training Progress on Method: {method} (Supervised GNN)")
            plt.subplot(2, 1, 1)
            plt.plot(loss_list, label='Train Loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()

            if config['task_type'] == 'classification':
                plt.subplot(2, 1, 2)
                plt.plot(acc_list, label='Train Acc', color='orange')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.legend()

            plt.show(block=True)
        
        test_metrics = evaluate(model, test_loader, criterion, config['device'])
        print(f"\n=== Final Test Result on {method} labels (Supervised GNN) ===")
        if config['task_type'] == 'classification':
            print(f"[Test] Loss: {test_metrics['loss']:.4f} | Acc: {test_metrics['accuracy']:.4f} | "
                  f"Precision: {test_metrics['precision']:.4f} | Recall: {test_metrics['recall']:.4f} | F1: {test_metrics['f1_score']:.4f}")
            results[method] = test_metrics
        else:
            print(f"[Test] Loss: {test_metrics['loss']:.4f}")
            results[method] = {'Test Loss': test_metrics['loss']}

    df = pd.DataFrame(results).T
    print("\n--- Supervised GNN Final Performance Summary ---")
    print(df.to_string(formatters={'loss': '{:.4f}'.format, 'accuracy': '{:.4f}'.format, 'precision': '{:.4f}'.format, 'recall': '{:.4f}'.format, 'f1_score': '{:.4f}'.format}))

# GraphSAGE模型训练
def train_graphsage(config, visual_flag=False):
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    results = {}  # 用于存储每个方法的测试结果

    for method, loaders in dataset_groups.items():
        print(f"\n=== Training on {method} labels ===")
        train_loader = loaders['train']
        dev_loader = loaders['dev']
        test_loader = loaders['test']

        loss_list = []
        acc_list = []

        sample_data = next(iter(train_loader))
        in_dim = sample_data.x.size(1)
        all_labels = torch.cat([batch.y.view(-1) for batch in train_loader], dim=0)
        out_dim = int(all_labels.max().item() + 1) if config['task_type'] == 'classification' else 1
        # 切换为GraphSAGE模型
        model = GraphSAGE(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
        optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-4)
        criterion = torch.nn.CrossEntropyLoss() if config['task_type'] == 'classification' else torch.nn.MSELoss()

        for epoch in range(config['epochs']):
            model.train()
            total_loss = total_correct = total_samples = 0

            for batch in train_loader:
                metrics = train_one_epoch(model, batch, optimizer, criterion, config['task_type'], config['device'], train=True)
                num_nodes_in_batch = batch.y.size(0)
                total_loss += metrics['loss'] * num_nodes_in_batch
                total_samples += num_nodes_in_batch
                total_correct += metrics['acc'] * num_nodes_in_batch
            
            avg_loss = total_loss / total_samples
            avg_acc = total_correct / total_samples
            loss_list.append(avg_loss)
            acc_list.append(avg_acc)

            if not visual_flag and (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1}/{config['epochs']} | Train Loss: {avg_loss:.4f} | Train Acc: {avg_acc:.4f}")

            if (epoch + 1) % 10 == 0:
                dev_metrics = evaluate(model, dev_loader, criterion, config['device'])
                print(f"[Dev] Loss: {dev_metrics['loss']:.4f} | Acc: {dev_metrics['accuracy']:.4f} | F1: {dev_metrics['f1_score']:.4f}")


        if visual_flag:
            plt.clf()
            plt.suptitle(f"Training Progress on Method: {method} (Supervised GraphSAGE)")
            plt.subplot(2, 1, 1)
            plt.plot(loss_list, label='Train Loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()

            if config['task_type'] == 'classification':
                plt.subplot(2, 1, 2)
                plt.plot(acc_list, label='Train Acc', color='orange')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.legend()

            plt.show(block=True)
        
        test_metrics = evaluate(model, test_loader, criterion, config['device'])
        print(f"\n=== Final Test Result on {method} labels (Supervised GraphSAGE) ===")
        if config['task_type'] == 'classification':
            print(f"[Test] Loss: {test_metrics['loss']:.4f} | Acc: {test_metrics['accuracy']:.4f} | "
                  f"Precision: {test_metrics['precision']:.4f} | Recall: {test_metrics['recall']:.4f} | F1: {test_metrics['f1_score']:.4f}")
            results[method] = test_metrics
        else:
            print(f"[Test] Loss: {test_metrics['loss']:.4f}")
            results[method] = {'Test Loss': test_metrics['loss']}

    df = pd.DataFrame(results).T
    print("\n--- Supervised GraphSAGE Final Performance Summary ---")
    print(df.to_string(formatters={'loss': '{:.4f}'.format, 'accuracy': '{:.4f}'.format, 'precision': '{:.4f}'.format, 'recall': '{:.4f}'.format, 'f1_score': '{:.4f}'.format}))

# GraphCL模型训练
def train_graphcl(config, visual_flag=False):
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    results = {}  # 用于存储每个方法的测试结果

    for method, loaders in dataset_groups.items():
        print(f"\n=== Training on {method} labels ===")
        train_loader = loaders['train']
        dev_loader = loaders['dev']
        test_loader = loaders['test']

        loss_list = []
        acc_list = []

        sample_data = next(iter(train_loader))
        in_dim = sample_data.x.size(1)
        all_labels = torch.cat([batch.y.view(-1) for batch in train_loader], dim=0)
        out_dim = int(all_labels.max().item() + 1) if config['task_type'] == 'classification' else 1
        # 切换为GraphCL模型
        model = GraphCL(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
        optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-4)
        criterion = torch.nn.CrossEntropyLoss() if config['task_type'] == 'classification' else torch.nn.MSELoss()

        for epoch in range(config['epochs']):
            model.train()
            total_loss = total_correct = total_samples = 0

            for batch in train_loader:
                metrics = train_one_epoch(model, batch, optimizer, criterion, config['task_type'], config['device'], train=True)
                num_nodes_in_batch = batch.y.size(0)
                total_loss += metrics['loss'] * num_nodes_in_batch
                total_samples += num_nodes_in_batch
                total_correct += metrics['acc'] * num_nodes_in_batch
            
            avg_loss = total_loss / total_samples
            avg_acc = total_correct / total_samples
            loss_list.append(avg_loss)
            acc_list.append(avg_acc)

            if not visual_flag and (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1}/{config['epochs']} | Train Loss: {avg_loss:.4f} | Train Acc: {avg_acc:.4f}")

            if (epoch + 1) % 10 == 0:
                dev_metrics = evaluate(model, dev_loader, criterion, config['device'])
                print(f"[Dev] Loss: {dev_metrics['loss']:.4f} | Acc: {dev_metrics['accuracy']:.4f} | F1: {dev_metrics['f1_score']:.4f}")


        if visual_flag:
            plt.clf()
            plt.suptitle(f"Training Progress on Method: {method} (Supervised GraphCL)")
            plt.subplot(2, 1, 1)
            plt.plot(loss_list, label='Train Loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()

            if config['task_type'] == 'classification':
                plt.subplot(2, 1, 2)
                plt.plot(acc_list, label='Train Acc', color='orange')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.legend()

            plt.show(block=True)
        
        test_metrics = evaluate(model, test_loader, criterion, config['device'])
        print(f"\n=== Final Test Result on {method} labels (Supervised GraphCL) ===")
        if config['task_type'] == 'classification':
            print(f"[Test] Loss: {test_metrics['loss']:.4f} | Acc: {test_metrics['accuracy']:.4f} | "
                  f"Precision: {test_metrics['precision']:.4f} | Recall: {test_metrics['recall']:.4f} | F1: {test_metrics['f1_score']:.4f}")
            results[method] = test_metrics
        else:
            print(f"[Test] Loss: {test_metrics['loss']:.4f}")
            results[method] = {'Test Loss': test_metrics['loss']}

    df = pd.DataFrame(results).T
    print("\n--- Supervised GraphCL Final Performance Summary ---")
    print(df.to_string(formatters={'loss': '{:.4f}'.format, 'accuracy': '{:.4f}'.format, 'precision': '{:.4f}'.format, 'recall': '{:.4f}'.format, 'f1_score': '{:.4f}'.format}))

# GraphMAE2模型训练
def train_graphmae2(config, visual_flag=False):
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    results = {}  # 用于存储每个方法的测试结果

    for method, loaders in dataset_groups.items():
        print(f"\n=== Training on {method} labels ===")
        train_loader = loaders['train']
        dev_loader = loaders['dev']
        test_loader = loaders['test']

        loss_list = []
        acc_list = []

        sample_data = next(iter(train_loader))
        in_dim = sample_data.x.size(1)
        all_labels = torch.cat([batch.y.view(-1) for batch in train_loader], dim=0)
        out_dim = int(all_labels.max().item() + 1) if config['task_type'] == 'classification' else 1
        # 切换为GraphMAE2模型
        model = GraphMAE2Model(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
        optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-4)
        criterion = torch.nn.CrossEntropyLoss() if config['task_type'] == 'classification' else torch.nn.MSELoss()

        for epoch in range(config['epochs']):
            model.train()
            total_loss = total_correct = total_samples = 0

            for batch in train_loader:
                metrics = train_one_epoch(model, batch, optimizer, criterion, config['task_type'], config['device'], train=True)
                num_nodes_in_batch = batch.y.size(0)
                total_loss += metrics['loss'] * num_nodes_in_batch
                total_samples += num_nodes_in_batch
                total_correct += metrics['acc'] * num_nodes_in_batch
            
            avg_loss = total_loss / total_samples
            avg_acc = total_correct / total_samples
            loss_list.append(avg_loss)
            acc_list.append(avg_acc)

            if not visual_flag and (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch+1}/{config['epochs']} | Train Loss: {avg_loss:.4f} | Train Acc: {avg_acc:.4f}")

            if (epoch + 1) % 10 == 0:
                dev_metrics = evaluate(model, dev_loader, criterion, config['device'])
                print(f"[Dev] Loss: {dev_metrics['loss']:.4f} | Acc: {dev_metrics['accuracy']:.4f} | F1: {dev_metrics['f1_score']:.4f}")


        if visual_flag:
            plt.clf()
            plt.suptitle(f"Training Progress on Method: {method} (Supervised GraphMAE2)")
            plt.subplot(2, 1, 1)
            plt.plot(loss_list, label='Train Loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()

            if config['task_type'] == 'classification':
                plt.subplot(2, 1, 2)
                plt.plot(acc_list, label='Train Acc', color='orange')
                plt.xlabel('Epoch')
                plt.ylabel('Accuracy')
                plt.legend()

            plt.show(block=True)
        
        test_metrics = evaluate(model, test_loader, criterion, config['device'])
        print(f"\n=== Final Test Result on {method} labels (Supervised GraphMAE2) ===")
        if config['task_type'] == 'classification':
            print(f"[Test] Loss: {test_metrics['loss']:.4f} | Acc: {test_metrics['accuracy']:.4f} | "
                  f"Precision: {test_metrics['precision']:.4f} | Recall: {test_metrics['recall']:.4f} | F1: {test_metrics['f1_score']:.4f}")
            results[method] = test_metrics
        else:
            print(f"[Test] Loss: {test_metrics['loss']:.4f}")
            results[method] = {'Test Loss': test_metrics['loss']}

    df = pd.DataFrame(results).T
    print("\n--- Supervised GraphMAE2 Final Performance Summary ---")
    print(df.to_string(formatters={'loss': '{:.4f}'.format, 'accuracy': '{:.4f}'.format, 'precision': '{:.4f}'.format, 'recall': '{:.4f}'.format, 'f1_score': '{:.4f}'.format}))

# DGI模型训练
def train_dgi_and_evaluate(config, visual_flag=False):
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    results = {}
    device = config['device']

    print("\n=== STAGE 1: DGI Unsupervised Pre-training ===")
    dgi_train_loader = dataset_groups['combine']['train']
    sample_data = next(iter(dgi_train_loader))
    in_dim = sample_data.x.size(1)
    
    dgi_hidden_dim = config.get('dgi_hidden_dim', 128)
    dgi_epochs = config.get('dgi_epochs', 200)
    dgi_lr = config.get('dgi_lr', 0.001)

    dgi_model = DGI(in_channels=in_dim, hidden_channels=dgi_hidden_dim).to(device)
    dgi_optimizer = torch.optim.AdamW(dgi_model.parameters(), lr=dgi_lr, weight_decay=1e-5)

    for epoch in range(dgi_epochs):
        dgi_model.train()
        total_loss = 0
        for batch in dgi_train_loader:
            batch = batch.to(device)
            dgi_optimizer.zero_grad()
            pos_scores, neg_scores = dgi_model(batch)
            loss = dgi_model.loss(pos_scores, neg_scores)
            loss.backward()
            dgi_optimizer.step()
            total_loss += loss.item()
        
        avg_loss = total_loss / len(dgi_train_loader)
        if (epoch + 1) % 10 == 0:
            print(f"[DGI Pre-training] Epoch {epoch+1}/{dgi_epochs} | Unsupervised Loss: {avg_loss:.4f}")

    print("DGI Pre-training finished.")
    dgi_model.eval()

    print("\n=== STAGE 2: Supervised Evaluation with DGI Embeddings ===")
    for method, loaders in dataset_groups.items():
        print(f"\n--- Evaluating on {method} labels ---")
        train_loader = loaders['train']
        dev_loader = loaders['dev']
        test_loader = loaders['test']

        all_labels_for_dim = torch.cat([batch.y.view(-1) for batch in train_loader], dim=0)
        out_dim = int(all_labels_for_dim.max().item() + 1) if config['task_type'] == 'classification' else 1

        classifier = torch.nn.Linear(dgi_hidden_dim, out_dim).to(device)
        classifier_optimizer = torch.optim.AdamW(classifier.parameters(), lr=config['lr'], weight_decay=1e-4)
        criterion = torch.nn.CrossEntropyLoss()

        for epoch in range(config['epochs']):
            classifier.train()
            for batch in train_loader:
                batch = batch.to(device)
                with torch.no_grad():
                    embeds = dgi_model.get_embeds(batch)
                
                classifier_optimizer.zero_grad()
                outputs = classifier(embeds)
                loss = criterion(outputs, batch.y)
                loss.backward()
                classifier_optimizer.step()

        classifier.eval()
        total_test_loss, total_test_samples = 0, 0
        all_preds = []
        all_labels = []
        total_test_loss = 0
        with torch.no_grad():
            for batch in test_loader:
                batch = batch.to(device)
                embeds = dgi_model.get_embeds(batch)
                outputs = classifier(embeds)
                loss = criterion(outputs, batch.y)
                
                num_nodes_in_batch = batch.y.size(0)
                total_test_loss += loss.item() * num_nodes_in_batch
                total_test_samples += num_nodes_in_batch
                _, predicted = torch.max(outputs, 1)
                all_preds.extend(predicted.cpu().numpy())
                all_labels.extend(batch.y.cpu().numpy())
        
        test_loss = total_test_loss / total_test_samples if total_test_samples > 0 else 0
        test_acc = sum(1 for i in range(len(all_preds)) if all_preds[i] == all_labels[i]) / len(all_labels) if len(all_labels) > 0 else 0
        test_precision = precision_score(all_labels, all_preds, average='weighted', zero_division=0)
        test_recall = recall_score(all_labels, all_preds, average='weighted', zero_division=0)
        test_f1 = f1_score(all_labels, all_preds, average='weighted', zero_division=0)


        print(f"=== Final Test Result on {method} labels (using DGI) ===")
        print(f"[Test] Loss: {test_loss:.4f} | Acc: {test_acc:.4f} | "
              f"Precision: {test_precision:.4f} | Recall: {test_recall:.4f} | F1: {test_f1:.4f}")
        
        results[method] = {
            "loss": test_loss,
            "accuracy": test_acc,
            "precision": test_precision,
            "recall": test_recall,
            "f1_score": test_f1
        }

    df = pd.DataFrame(results).T
    print("\n--- DGI Final Performance Summary ---")
    print(df.to_string(formatters={'loss': '{:.4f}'.format, 'accuracy': '{:.4f}'.format, 'precision': '{:.4f}'.format, 'recall': '{:.4f}'.format, 'f1_score': '{:.4f}'.format}))

# 通用训练循环模块
def train_loop_supervised(model, loaders, config, get_embeds_func=None, visual_flag=False, method_name=""):
    device = config['device']
    optimizer = torch.optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-4)
    criterion = torch.nn.CrossEntropyLoss()
    
    train_loader, dev_loader = loaders['train'], loaders['dev']
    loss_list, acc_list = [], []

    print(f"  Starting supervised training for {config['epochs']} epochs...")
    for epoch in range(config['epochs']):
        model.train()
        total_loss, total_correct, total_samples = 0, 0, 0
        
        for batch in train_loader:
            # DGI的简单分类器训练
            if get_embeds_func:
                batch = batch.to(device)
                with torch.no_grad():
                    features = get_embeds_func(batch)
                optimizer.zero_grad()
                outputs = model(features)
                loss = criterion(outputs, batch.y)
                loss.backward()
                optimizer.step()
                # 手动计算指标
                pred = outputs.argmax(dim=1)
                acc = (pred == batch.y).sum().item() / batch.y.size(0)
                metrics = {'loss': loss.item(), 'acc': acc}
            # GNN,GraphSAGE,GraphCL,GraphMAE2Model的训练
            else:
                metrics = train_one_epoch(model, batch, optimizer, criterion, config['task_type'], device, train=True)

            num_nodes_in_batch = batch.y.size(0)
            total_loss += metrics['loss'] * num_nodes_in_batch
            total_samples += num_nodes_in_batch
            total_correct += metrics['acc'] * num_nodes_in_batch
            
        avg_loss = total_loss / total_samples
        avg_acc = total_correct / total_samples
        loss_list.append(avg_loss)
        acc_list.append(avg_acc)

        if (epoch + 1) % 20 == 0: # 定期在验证集上评估
            dev_metrics = evaluate(model, dev_loader, criterion, device, get_embeds_func)
            print(f"    Epoch {epoch+1}/{config['epochs']} -> Train Acc: {avg_acc:.4f}, "
                  f"Dev Loss: {dev_metrics['loss']:.4f}, Dev Acc: {dev_metrics['accuracy']:.4f}, Dev F1: {dev_metrics['f1_score']:.4f}")

    if visual_flag:
        plt.figure(figsize=(10, 5))
        plt.plot(acc_list, label='Train Accuracy')
        plt.title(f"Training Accuracy Curve: {method_name}")
        plt.xlabel('Epoch'); plt.ylabel('Accuracy'); plt.legend(); plt.grid(True)
        plt.show(block=True)
        
    return model

def main(config, visual_flag=False):  
    print("Loading datasets...")
    dataset_groups = load_datasets(batch_size=config['batch_size'])
    
    experiments_to_run = ['gnn', 'dgi','graphsage','graphcl','graphmae2']  # 可根据需要添加更多模型
    all_results = {}

    for model_name in experiments_to_run:
        print(f"\n{'='*25}\n  RUNNING EXPERIMENT: {model_name.upper()}\n{'='*25}")
        model_results = {}
        
        dgi_encoder = None
        if model_name == 'dgi':
            print("\n--- STAGE 1: DGI Unsupervised Pre-training ---")
            dgi_train_loader = dataset_groups['combine']['train']
            sample_data = next(iter(dgi_train_loader))
            in_dim = sample_data.x.size(1)
            
            dgi_model = DGI(in_channels=in_dim, hidden_channels=config['dgi_hidden_dim']).to(config['device'])
            dgi_optimizer = torch.optim.AdamW(dgi_model.parameters(), lr=config['dgi_lr'], weight_decay=1e-5)

            for epoch in range(config['dgi_epochs']):
                dgi_model.train()
                total_loss = 0
                for batch in dgi_train_loader:
                    batch = batch.to(config['device'])
                    dgi_optimizer.zero_grad()
                    pos, neg = dgi_model(batch)
                    loss = dgi_model.loss(pos, neg)
                    loss.backward()
                    dgi_optimizer.step()
                    total_loss += loss.item()
                
                if (epoch + 1) % 20 == 0:
                    avg_loss = total_loss / len(dgi_train_loader)
                    print(f"  DGI Pre-training Epoch {epoch+1}/{config['dgi_epochs']}, Unsupervised Loss: {avg_loss:.4f}")
            
            dgi_encoder = dgi_model.eval()
            print("DGI Pre-training finished.\n")
            print("--- STAGE 2: Supervised Evaluation ---")

        for method, loaders in dataset_groups.items():
            print(f"\nTraining and evaluating on '{method}' labels...")
            
            sample_data = next(iter(loaders['train']))
            # 使用 torch.cat 获取所有标签来确定输出维度，更稳健
            all_labels_in_split = torch.cat([b.y for b in loaders['train']])
            out_dim = int(all_labels_in_split.max().item()) + 1
            
            model_to_train, feature_extractor = None, None

            if model_name == 'gnn':
                in_dim = sample_data.x.size(1)
                model_to_train = GNNModel(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim, 
                                          num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
            elif model_name == 'dgi':
                model_to_train = torch.nn.Linear(config['dgi_hidden_dim'], out_dim).to(config['device'])
                feature_extractor = dgi_encoder.get_embeds

            elif model_name == 'graphsage':
                in_dim = sample_data.x.size(1)
                model_to_train = GraphSAGE(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,
                                           num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
            
            elif model_name == 'graphcl':
                in_dim = sample_data.x.size(1)
                model_to_train = GraphCL(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,
                                         num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
            
            elif model_name == 'graphmae2':
                in_dim = sample_data.x.size(1)
                model_to_train = GraphMAE2Model(in_dim=in_dim, hidden_dim=config['hidden_dim'], out_dim=out_dim,
                                                num_layers=config['num_layers'], dropout=config['dropout']).to(config['device'])
            


            # 调用通用的训练循环
            trained_model = train_loop_supervised(
                model_to_train, loaders, config, 
                get_embeds_func=feature_extractor, 
                visual_flag=visual_flag,
                method_name=f"{model_name.upper()} on '{method}' data" # 优化了可视化标题
            )
            
            # 调用通用的评估函数
            criterion = torch.nn.CrossEntropyLoss()
            test_metrics = evaluate(trained_model, loaders['test'], criterion, config['device'], get_embeds_func=feature_extractor)
            model_results[method] = test_metrics
            
            print(f"  -> Final Test Results for '{method}':\n"
                  f"     Loss={test_metrics['loss']:.4f}, Acc={test_metrics['accuracy']:.4f}, "
                  f"Precision={test_metrics['precision']:.4f}, Recall={test_metrics['recall']:.4f}, "
                  f"F1={test_metrics['f1_score']:.4f}")

        all_results[model_name] = pd.DataFrame(model_results).T

    # 最终结果展示
    print("\n\n" + "="*30 + "\n  FINAL PERFORMANCE SUMMARY\n" + "="*30)
    for model_name, df in all_results.items():
        print(f"\n--- Model: {model_name.upper()} ---")
        # 确保所有指标都以 .4f 格式打印
        formatters = {col: '{:.4f}'.format for col in df.columns}
        print(df.to_string(formatters=formatters))

if __name__ == "__main__":
    # loaders = load_datasets(batch_size=1)
    # for batch in loaders['kmeans']:
    #     print(batch.x.shape)  # 节点特征
    #     print(batch.edge_index.shape)  # 边索引
    #     print(batch.y.shape)  # 标签
    #     # model(batch.x, batch.edge_index)
    #     break
    config = {
        'batch_size': 64,
        'epochs': 200,
        'lr': 0.003,
        'hidden_dim': 128,
        'num_layers': 10,
        'dropout': 0.3,
        'task_type': 'classification',
        'device': 'cuda' if torch.cuda.is_available() else 'cpu',

                # --- DGI超参数 ---
        'dgi_hidden_dim': 128,  # DGI编码器输出的嵌入维度
        'dgi_epochs': 200,      # DGI无监督预训练的轮数
        'dgi_lr': 0.001,        # DGI预训练的学习率
    }
    """ 单独实验
    # 1：GNN模型
    print("==========================================================")
    print("           RUNNING EXPERIMENT 1: GNNModel      ")
    print("==========================================================")
    train_gnn(config)

    # 2：GraphSAGE模型
    print("==========================================================")
    print("           RUNNING EXPERIMENT 2: GraphSAGEModel      ")
    print("==========================================================")
    train_graphsage(config)

    # 3：DGI
    print("\n\n==========================================================")
    print("           RUNNING EXPERIMENT 3: DGI + Evaluation         ")
    print("==========================================================")
    train_dgi_and_evaluate(config)

    # 4：GraphCL模型
    print("==========================================================")
    print("           RUNNING EXPERIMENT 4: GraphCLModel      ")
    print("==========================================================")
    train_graphcl(config)

    # 5：GraphMAE2模型
    print("==========================================================")
    print("           RUNNING EXPERIMENT 5: GraphMAE2Model      ")
    print("==========================================================")
    train_graphmae2(config)
    """

    """ 通用实验 """
    main(config)