{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "bc941af1",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "import os\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import copy\n",
    "import copy as cp\n",
    "import time\n",
    "import logging, sys, json\n",
    "from datetime import datetime\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, confusion_matrix\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "\n",
    "\n",
    "def create_node_subgraph(node_idx, feat_data, edge_indexs, device):\n",
    "    \"\"\"\n",
    "    为单个节点创建一阶邻居子图（所有邻居）\n",
    "    \"\"\"\n",
    "    neighbors = set()\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        # 找出以中心点为起点的边的终点\n",
    "        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()\n",
    "        neighbors.update(rel_neighbors)\n",
    "\n",
    "    # 移除中心节点自身\n",
    "    neighbors.discard(node_idx)\n",
    "    neighbors = list(neighbors)\n",
    "\n",
    "    # 如果邻居太多，进行随机采样截取\n",
    "    sample_size = 399\n",
    "    if len(neighbors) > sample_size:\n",
    "        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()\n",
    "\n",
    "    # 构建子图节点列表，确保中心节点是第一个\n",
    "    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]\n",
    "\n",
    "    # 构建子图边列表\n",
    "    sub_edge_index = []\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "        local_edges = edge_index[:, mask]\n",
    "\n",
    "        # 创建节点映射\n",
    "        node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "\n",
    "        # 将全局索引映射到局部索引\n",
    "        if len(local_edges) > 0 and local_edges.size > 0:\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "        else:\n",
    "            # 添加自环确保图不为空\n",
    "            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)\n",
    "\n",
    "        sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "    # 创建子图数据\n",
    "    subgraph = {\n",
    "        'features': feat_data[sub_nodes].clone(),\n",
    "        'edges': sub_edge_index,\n",
    "        'global_idx': sub_nodes\n",
    "    }\n",
    "\n",
    "    return subgraph\n",
    "\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "        \n",
    "def setup_logger(log_dir='./logs', log_name=None):\n",
    "    \"\"\"返回一个同时写文件与终端的 utf-8 logger\"\"\"\n",
    "    os.makedirs(log_dir, exist_ok=True)\n",
    "    if log_name is None:\n",
    "        log_name = datetime.now().strftime('%Y%m%d-%H%M%S') + '.log'\n",
    "    log_path = os.path.join(log_dir, log_name)\n",
    "\n",
    "    logger = logging.getLogger('BSNE')\n",
    "    logger.setLevel(logging.INFO)\n",
    "    # 防止重复 handler\n",
    "    if not logger.handlers:\n",
    "        # 文件 handler（utf-8）\n",
    "        fh = logging.FileHandler(log_path, encoding='utf-8')\n",
    "        fh.setLevel(logging.INFO)\n",
    "        # 终端 handler\n",
    "        ch = logging.StreamHandler(sys.stdout)\n",
    "        ch.setLevel(logging.INFO)\n",
    "        # 统一格式\n",
    "        fmt = '%(asctime)s | %(levelname)s | %(message)s'\n",
    "        formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')\n",
    "        fh.setFormatter(formatter)\n",
    "        ch.setFormatter(formatter)\n",
    "        logger.addHandler(fh)\n",
    "        logger.addHandler(ch)\n",
    "    return logger\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):\n",
    "    model.eval()\n",
    "    all_probs = []\n",
    "    all_labels = []\n",
    "\n",
    "    # 分批处理\n",
    "    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):\n",
    "        subgraph_data = []\n",
    "\n",
    "        # 为每个中心节点构建子图\n",
    "        for xi in batch_centers:\n",
    "            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "            subgraph_data.append(subgraph)\n",
    "\n",
    "        # 获取中心节点预测\n",
    "        with torch.no_grad():\n",
    "            center_logits, _ = model(subgraph_data)  # [B, 2]\n",
    "            probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率\n",
    "            all_probs.extend(probs.cpu().numpy())\n",
    "            all_labels.extend([labels[xi] for xi in batch_centers])\n",
    "\n",
    "    # 计算指标\n",
    "    auc_score = roc_auc_score(all_labels, all_probs)\n",
    "    ap_score = average_precision_score(all_labels, all_probs)\n",
    "    pred_labels = (np.array(all_probs) >= 0.5).astype(int)\n",
    "    f1 = f1_score(all_labels, pred_labels, average='macro')\n",
    "    g_mean = calculate_g_mean(all_labels, pred_labels)\n",
    "\n",
    "    return auc_score, ap_score, f1, g_mean\n",
    "\n",
    "\n",
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    #         self.node_feature_extractor = nn.Sequential(\n",
    "    #             nn.Linear(relation_nums * d_model, d_model),\n",
    "    #             nn.ReLU(),\n",
    "    #             nn.LayerNorm(d_model)\n",
    "    #         )\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 为每个子图单独处理\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 处理单个子图\n",
    "            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码\n",
    "                encoder_output = self.relation_encoders[rel_idx](\n",
    "                    src=features,\n",
    "                    mask=adj_mask\n",
    "                )\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征（第一个节点）\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            center_logits = self.classifier(center_features)  # [1, out_feat]\n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "15650663",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2025-07-23 12:27:28 | INFO | ============  BSNE Training  ============\n",
      "2025-07-23 12:27:28 | INFO | Args:\n",
      "{\n",
      "  \"dataset\": \"amazon\",\n",
      "  \"weight_decay\": 5e-05,\n",
      "  \"seed\": 76,\n",
      "  \"pretrain_epochs\": 10,\n",
      "  \"max_steps\": 100,\n",
      "  \"sample_size\": 100,\n",
      "  \"loss_threshold\": 0.1,\n",
      "  \"pretrain_lr\": 0.0015,\n",
      "  \"finetune_lr\": 0.0005,\n",
      "  \"batch_size\": 8,\n",
      "  \"num_epochs\": 80,\n",
      "  \"patience\": 30,\n",
      "  \"weight\": 0.6,\n",
      "  \"test_size\": 0.3,\n",
      "  \"val_size\": 0.5,\n",
      "  \"layers_tree\": 7,\n",
      "  \"num_heads\": 4,\n",
      "  \"num_layers\": 2,\n",
      "  \"drop_rate\": 0.5\n",
      "}\n",
      "2025-07-23 12:27:28 | INFO | cuda\n",
      "2025-07-23 12:27:28 | INFO | loading data...\n",
      "2025-07-23 12:28:15 | INFO | \n",
      "=== Starting Pretraining ===\n",
      "2025-07-23 12:28:15 | INFO | \n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "2025-07-23 12:29:33 | INFO | Step 0: local_loss: -0.9489615559577942 | global_loss: 1.8439226150512695 | total_loss: 0.8949610590934753\n",
      "2025-07-23 12:30:48 | INFO | Step 1: local_loss: -0.9761890769004822 | global_loss: 1.7756544351577759 | total_loss: 0.7994653582572937\n",
      "2025-07-23 12:31:58 | INFO | Step 2: local_loss: -0.9774107933044434 | global_loss: 1.8807175159454346 | total_loss: 0.9033067226409912\n",
      "2025-07-23 12:33:01 | INFO | Step 3: local_loss: -1.0107611417770386 | global_loss: 1.8387020826339722 | total_loss: 0.8279409408569336\n",
      "2025-07-23 12:34:02 | INFO | Step 4: local_loss: -0.9930344223976135 | global_loss: 1.6628499031066895 | total_loss: 0.6698154807090759\n",
      "2025-07-23 12:36:04 | INFO | Step 6: local_loss: -0.9706510901451111 | global_loss: 1.8500022888183594 | total_loss: 0.8793511986732483\n",
      "2025-07-23 12:37:04 | INFO | Step 7: local_loss: -0.9670863151550293 | global_loss: 1.611554503440857 | total_loss: 0.6444681882858276\n",
      "2025-07-23 12:38:05 | INFO | Step 8: local_loss: -0.8953956961631775 | global_loss: 1.6015881299972534 | total_loss: 0.7061924338340759\n",
      "2025-07-23 12:39:06 | INFO | Step 9: local_loss: -1.07184636592865 | global_loss: 1.829147219657898 | total_loss: 0.757300853729248\n",
      "2025-07-23 12:40:04 | INFO | Step 10: local_loss: -0.9860935807228088 | global_loss: 1.722662329673767 | total_loss: 0.7365687489509583\n",
      "2025-07-23 12:41:02 | INFO | Step 11: local_loss: -0.9860479235649109 | global_loss: 1.9593052864074707 | total_loss: 0.9732573628425598\n",
      "2025-07-23 12:42:01 | INFO | Step 12: local_loss: -0.948978841304779 | global_loss: 1.715718150138855 | total_loss: 0.7667393088340759\n",
      "2025-07-23 12:43:03 | INFO | Step 13: local_loss: -0.9726009964942932 | global_loss: 1.7787553071975708 | total_loss: 0.8061543107032776\n",
      "2025-07-23 12:44:03 | INFO | Step 14: local_loss: -0.9783524870872498 | global_loss: 1.8263293504714966 | total_loss: 0.8479768633842468\n",
      "2025-07-23 12:45:04 | INFO | Step 15: local_loss: -1.005502462387085 | global_loss: 1.7450858354568481 | total_loss: 0.7395833730697632\n",
      "2025-07-23 12:46:05 | INFO | Step 16: local_loss: -0.9642726182937622 | global_loss: 1.8096203804016113 | total_loss: 0.8453477621078491\n",
      "2025-07-23 12:47:05 | INFO | Step 17: local_loss: -0.984026312828064 | global_loss: 1.893119215965271 | total_loss: 0.909092903137207\n",
      "2025-07-23 12:48:05 | INFO | Step 18: local_loss: -0.9799201488494873 | global_loss: 1.6691793203353882 | total_loss: 0.6892591714859009\n",
      "2025-07-23 12:49:06 | INFO | Step 19: local_loss: -0.8902003169059753 | global_loss: 1.5130414962768555 | total_loss: 0.6228411793708801\n",
      "2025-07-23 12:50:05 | INFO | Step 20: local_loss: -0.9719525575637817 | global_loss: 1.7343559265136719 | total_loss: 0.7624033689498901\n",
      "2025-07-23 12:51:06 | INFO | Step 21: local_loss: -0.9238100051879883 | global_loss: 1.6404105424880981 | total_loss: 0.7166005373001099\n",
      "2025-07-23 12:52:06 | INFO | Step 22: local_loss: -0.9709154367446899 | global_loss: 1.7127811908721924 | total_loss: 0.7418657541275024\n",
      "2025-07-23 12:53:06 | INFO | Step 23: local_loss: -1.0628257989883423 | global_loss: 1.8501211404800415 | total_loss: 0.7872953414916992\n",
      "2025-07-23 12:53:50 | INFO | Step 24: local_loss: -0.9916669726371765 | global_loss: 1.7082443237304688 | total_loss: 0.7165773510932922\n",
      "2025-07-23 12:54:49 | INFO | Step 25: local_loss: -1.0370674133300781 | global_loss: 1.6329550743103027 | total_loss: 0.5958876609802246\n",
      "2025-07-23 12:55:51 | INFO | Step 26: local_loss: -1.0115363597869873 | global_loss: 1.6235731840133667 | total_loss: 0.6120368242263794\n",
      "2025-07-23 12:56:52 | INFO | Step 27: local_loss: -1.039500117301941 | global_loss: 1.8619800806045532 | total_loss: 0.8224799633026123\n",
      "2025-07-23 12:57:36 | INFO | Step 28: local_loss: -0.95773845911026 | global_loss: 1.6230440139770508 | total_loss: 0.6653055548667908\n",
      "2025-07-23 12:58:34 | INFO | Step 29: local_loss: -0.956500768661499 | global_loss: 1.8720611333847046 | total_loss: 0.9155603647232056\n",
      "2025-07-23 12:59:34 | INFO | Step 30: local_loss: -1.0120272636413574 | global_loss: 1.6689553260803223 | total_loss: 0.6569280624389648\n",
      "2025-07-23 13:00:32 | INFO | Step 31: local_loss: -1.003652811050415 | global_loss: 1.9057221412658691 | total_loss: 0.9020693302154541\n",
      "2025-07-23 13:01:16 | INFO | Step 32: local_loss: -0.986820638179779 | global_loss: 1.6697883605957031 | total_loss: 0.6829677224159241\n",
      "2025-07-23 13:02:14 | INFO | Step 33: local_loss: -0.9173802137374878 | global_loss: 1.526413917541504 | total_loss: 0.6090337038040161\n",
      "2025-07-23 13:03:15 | INFO | Step 34: local_loss: -0.9817595481872559 | global_loss: 1.6059439182281494 | total_loss: 0.6241843700408936\n",
      "2025-07-23 13:04:16 | INFO | Step 35: local_loss: -0.9210872650146484 | global_loss: 1.5991183519363403 | total_loss: 0.6780310869216919\n",
      "2025-07-23 13:05:16 | INFO | Step 36: local_loss: -1.0479024648666382 | global_loss: 1.6660621166229248 | total_loss: 0.6181596517562866\n",
      "2025-07-23 13:06:17 | INFO | Step 37: local_loss: -0.9504826068878174 | global_loss: 1.8530687093734741 | total_loss: 0.9025861024856567\n",
      "2025-07-23 13:07:17 | INFO | Step 38: local_loss: -0.9116982817649841 | global_loss: 1.6144987344741821 | total_loss: 0.702800452709198\n",
      "2025-07-23 13:08:19 | INFO | Step 39: local_loss: -0.9653365612030029 | global_loss: 1.7646362781524658 | total_loss: 0.7992997169494629\n",
      "2025-07-23 13:09:17 | INFO | Step 40: local_loss: -0.9111272692680359 | global_loss: 1.61260187625885 | total_loss: 0.7014746069908142\n",
      "2025-07-23 13:10:15 | INFO | Step 41: local_loss: -0.9949786067008972 | global_loss: 1.6280896663665771 | total_loss: 0.6331110596656799\n",
      "2025-07-23 13:11:14 | INFO | Step 42: local_loss: -0.9290327429771423 | global_loss: 1.6196339130401611 | total_loss: 0.6906011700630188\n",
      "2025-07-23 13:12:13 | INFO | Step 43: local_loss: -0.9677670001983643 | global_loss: 1.7137794494628906 | total_loss: 0.7460124492645264\n",
      "2025-07-23 13:13:10 | INFO | Step 44: local_loss: -0.9945170879364014 | global_loss: 1.7393563985824585 | total_loss: 0.7448393106460571\n",
      "2025-07-23 13:14:11 | INFO | Step 45: local_loss: -0.991316556930542 | global_loss: 1.765655279159546 | total_loss: 0.7743387222290039\n",
      "2025-07-23 13:14:55 | INFO | Step 46: local_loss: -1.029211401939392 | global_loss: 1.6764477491378784 | total_loss: 0.6472363471984863\n",
      "2025-07-23 13:15:52 | INFO | Step 47: local_loss: -0.9918226599693298 | global_loss: 1.6635295152664185 | total_loss: 0.6717068552970886\n",
      "2025-07-23 13:16:37 | INFO | Step 48: local_loss: -0.9670296907424927 | global_loss: 1.6305429935455322 | total_loss: 0.6635133028030396\n",
      "2025-07-23 13:17:33 | INFO | Step 49: local_loss: -1.0288041830062866 | global_loss: 1.9289157390594482 | total_loss: 0.9001115560531616\n",
      "2025-07-23 13:18:30 | INFO | Step 50: local_loss: -0.9806264042854309 | global_loss: 1.8600890636444092 | total_loss: 0.8794626593589783\n",
      "2025-07-23 13:19:25 | INFO | Step 51: local_loss: -1.0430885553359985 | global_loss: 1.8816202878952026 | total_loss: 0.8385317325592041\n",
      "2025-07-23 13:20:22 | INFO | Step 52: local_loss: -1.0148391723632812 | global_loss: 1.84880793094635 | total_loss: 0.8339687585830688\n",
      "2025-07-23 13:21:19 | INFO | Step 53: local_loss: -1.0084227323532104 | global_loss: 1.7280125617980957 | total_loss: 0.7195898294448853\n",
      "2025-07-23 13:22:19 | INFO | Step 54: local_loss: -1.0124062299728394 | global_loss: 1.742888331413269 | total_loss: 0.7304821014404297\n",
      "2025-07-23 13:23:03 | INFO | Step 55: local_loss: -1.0303136110305786 | global_loss: 1.7403945922851562 | total_loss: 0.7100809812545776\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2025-07-23 13:24:01 | INFO | Step 56: local_loss: -0.9541604518890381 | global_loss: 1.592839002609253 | total_loss: 0.6386785507202148\n",
      "2025-07-23 13:24:57 | INFO | Step 57: local_loss: -1.0279490947723389 | global_loss: 1.721191167831421 | total_loss: 0.693242073059082\n",
      "2025-07-23 13:25:54 | INFO | Step 58: local_loss: -1.0638271570205688 | global_loss: 1.753743052482605 | total_loss: 0.6899158954620361\n",
      "2025-07-23 13:26:53 | INFO | Step 59: local_loss: -1.021579623222351 | global_loss: 1.7244876623153687 | total_loss: 0.7029080390930176\n",
      "2025-07-23 13:27:51 | INFO | Step 60: local_loss: -0.9969103932380676 | global_loss: 1.7429115772247314 | total_loss: 0.7460011839866638\n",
      "2025-07-23 13:28:48 | INFO | Step 61: local_loss: -0.9865124821662903 | global_loss: 1.5488839149475098 | total_loss: 0.5623714327812195\n",
      "2025-07-23 13:29:46 | INFO | Step 62: local_loss: -1.0084892511367798 | global_loss: 1.820635199546814 | total_loss: 0.8121459484100342\n",
      "2025-07-23 13:30:43 | INFO | Step 63: local_loss: -0.9747558236122131 | global_loss: 1.6614487171173096 | total_loss: 0.6866928935050964\n",
      "2025-07-23 13:31:41 | INFO | Step 64: local_loss: -0.9925017952919006 | global_loss: 1.582992434501648 | total_loss: 0.5904906392097473\n",
      "2025-07-23 13:32:39 | INFO | Step 65: local_loss: -1.0139447450637817 | global_loss: 1.7850902080535889 | total_loss: 0.7711454629898071\n",
      "2025-07-23 13:33:37 | INFO | Step 66: local_loss: -0.9734767079353333 | global_loss: 1.6670328378677368 | total_loss: 0.6935561299324036\n",
      "2025-07-23 13:34:35 | INFO | Step 67: local_loss: -1.0632165670394897 | global_loss: 1.9076424837112427 | total_loss: 0.8444259166717529\n",
      "2025-07-23 13:35:33 | INFO | Step 68: local_loss: -1.0318965911865234 | global_loss: 1.7559738159179688 | total_loss: 0.7240772247314453\n",
      "2025-07-23 13:36:33 | INFO | Step 69: local_loss: -1.0047073364257812 | global_loss: 1.668081521987915 | total_loss: 0.6633741855621338\n",
      "2025-07-23 13:37:17 | INFO | Step 70: local_loss: -0.9695153832435608 | global_loss: 1.6609487533569336 | total_loss: 0.6914333701133728\n",
      "2025-07-23 13:38:15 | INFO | Step 71: local_loss: -0.9383121132850647 | global_loss: 1.6788992881774902 | total_loss: 0.7405871748924255\n",
      "2025-07-23 13:39:14 | INFO | Step 72: local_loss: -1.0009585618972778 | global_loss: 1.78715181350708 | total_loss: 0.7861932516098022\n",
      "2025-07-23 13:40:12 | INFO | Step 73: local_loss: -0.9545159935951233 | global_loss: 1.8189020156860352 | total_loss: 0.8643860220909119\n",
      "2025-07-23 13:41:10 | INFO | Step 74: local_loss: -1.0106778144836426 | global_loss: 1.6714720726013184 | total_loss: 0.6607942581176758\n",
      "2025-07-23 13:42:07 | INFO | Step 75: local_loss: -1.0180861949920654 | global_loss: 1.6943745613098145 | total_loss: 0.676288366317749\n",
      "2025-07-23 13:42:52 | INFO | Step 76: local_loss: -0.870166003704071 | global_loss: 1.4915283918380737 | total_loss: 0.6213623881340027\n",
      "2025-07-23 13:43:36 | INFO | Step 77: local_loss: -1.035502552986145 | global_loss: 1.6674319505691528 | total_loss: 0.6319293975830078\n",
      "2025-07-23 13:44:34 | INFO | Step 78: local_loss: -0.9551221132278442 | global_loss: 1.7405586242675781 | total_loss: 0.7854365110397339\n",
      "2025-07-23 13:45:32 | INFO | Step 79: local_loss: -0.940723180770874 | global_loss: 1.79056715965271 | total_loss: 0.8498439788818359\n",
      "2025-07-23 13:46:31 | INFO | Step 80: local_loss: -0.9672203063964844 | global_loss: 1.4811675548553467 | total_loss: 0.5139472484588623\n",
      "2025-07-23 13:47:19 | INFO | Step 81: local_loss: -0.9918166995048523 | global_loss: 1.6688547134399414 | total_loss: 0.6770380139350891\n",
      "2025-07-23 13:48:04 | INFO | Step 82: local_loss: -1.0328339338302612 | global_loss: 1.765587568283081 | total_loss: 0.7327536344528198\n",
      "2025-07-23 13:49:02 | INFO | Step 83: local_loss: -1.062101125717163 | global_loss: 1.588990569114685 | total_loss: 0.526889443397522\n",
      "2025-07-23 13:50:00 | INFO | Step 84: local_loss: -0.9398515224456787 | global_loss: 1.7689520120620728 | total_loss: 0.829100489616394\n",
      "2025-07-23 13:51:00 | INFO | Step 85: local_loss: -1.0242631435394287 | global_loss: 1.5923535823822021 | total_loss: 0.5680904388427734\n",
      "2025-07-23 13:51:58 | INFO | Step 86: local_loss: -0.9891194105148315 | global_loss: 1.637526273727417 | total_loss: 0.6484068632125854\n",
      "2025-07-23 13:52:56 | INFO | Step 87: local_loss: -0.9430850148200989 | global_loss: 1.737756371498108 | total_loss: 0.794671356678009\n",
      "2025-07-23 13:53:55 | INFO | Step 88: local_loss: -0.979414165019989 | global_loss: 1.9377195835113525 | total_loss: 0.9583054184913635\n",
      "2025-07-23 13:54:54 | INFO | Step 89: local_loss: -0.9906561970710754 | global_loss: 1.5656423568725586 | total_loss: 0.5749861598014832\n",
      "2025-07-23 13:55:53 | INFO | Step 90: local_loss: -1.0002421140670776 | global_loss: 1.7942628860473633 | total_loss: 0.7940207719802856\n",
      "2025-07-23 13:56:37 | INFO | Step 91: local_loss: -0.9564573168754578 | global_loss: 1.5500293970108032 | total_loss: 0.5935720801353455\n",
      "2025-07-23 13:57:33 | INFO | Step 92: local_loss: -1.0107890367507935 | global_loss: 1.848002552986145 | total_loss: 0.8372135162353516\n",
      "2025-07-23 13:58:33 | INFO | Step 93: local_loss: -1.0225194692611694 | global_loss: 1.8610336780548096 | total_loss: 0.8385142087936401\n",
      "2025-07-23 13:59:30 | INFO | Step 94: local_loss: -1.0440654754638672 | global_loss: 1.7682850360870361 | total_loss: 0.724219560623169\n",
      "2025-07-23 14:00:15 | INFO | Step 95: local_loss: -1.0130785703659058 | global_loss: 1.8476260900497437 | total_loss: 0.8345475196838379\n",
      "2025-07-23 14:01:13 | INFO | Step 96: local_loss: -1.0109893083572388 | global_loss: 1.6797425746917725 | total_loss: 0.6687532663345337\n",
      "2025-07-23 14:02:13 | INFO | Step 97: local_loss: -0.9970226883888245 | global_loss: 1.6635825634002686 | total_loss: 0.6665598750114441\n",
      "2025-07-23 14:03:11 | INFO | Step 98: local_loss: -0.9639126658439636 | global_loss: 1.612862467765808 | total_loss: 0.6489498019218445\n",
      "2025-07-23 14:03:55 | INFO | Step 99: local_loss: -0.9852675199508667 | global_loss: 1.6274011135101318 | total_loss: 0.6421335935592651\n",
      "2025-07-23 14:03:55 | INFO | Center node 268 finished at step 100 with loss 0.6421\n",
      "2025-07-23 14:03:55 | INFO | \n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "2025-07-23 14:04:53 | INFO | Step 0: local_loss: -0.4058429002761841 | global_loss: 1.7558684349060059 | total_loss: 1.3500255346298218\n",
      "2025-07-23 14:05:54 | INFO | Step 1: local_loss: -0.40325894951820374 | global_loss: 1.6030616760253906 | total_loss: 1.1998027563095093\n",
      "2025-07-23 14:06:53 | INFO | Step 2: local_loss: -0.4345221221446991 | global_loss: 1.7754135131835938 | total_loss: 1.3408913612365723\n",
      "2025-07-23 14:07:53 | INFO | Step 3: local_loss: -0.37887075543403625 | global_loss: 1.6147183179855347 | total_loss: 1.2358475923538208\n",
      "2025-07-23 14:08:49 | INFO | Step 4: local_loss: -0.4128855764865875 | global_loss: 1.7837706804275513 | total_loss: 1.3708851337432861\n",
      "2025-07-23 14:09:46 | INFO | Step 5: local_loss: -0.419147253036499 | global_loss: 1.537752389907837 | total_loss: 1.118605136871338\n",
      "2025-07-23 14:10:44 | INFO | Step 6: local_loss: -0.426166296005249 | global_loss: 1.5834355354309082 | total_loss: 1.1572692394256592\n",
      "2025-07-23 14:11:43 | INFO | Step 7: local_loss: -0.4205579161643982 | global_loss: 1.5277113914489746 | total_loss: 1.1071534156799316\n",
      "2025-07-23 14:12:43 | INFO | Step 8: local_loss: -0.2994379997253418 | global_loss: 1.6949138641357422 | total_loss: 1.3954758644104004\n",
      "2025-07-23 14:13:43 | INFO | Step 9: local_loss: -0.39420896768569946 | global_loss: 1.5307269096374512 | total_loss: 1.1365180015563965\n",
      "2025-07-23 14:14:42 | INFO | Step 10: local_loss: -0.3348448872566223 | global_loss: 1.6168551445007324 | total_loss: 1.2820103168487549\n",
      "2025-07-23 14:15:42 | INFO | Step 11: local_loss: -0.32726576924324036 | global_loss: 1.499384880065918 | total_loss: 1.172119140625\n",
      "2025-07-23 14:16:41 | INFO | Step 12: local_loss: -0.4167892038822174 | global_loss: 1.5166767835617065 | total_loss: 1.0998876094818115\n",
      "2025-07-23 14:17:40 | INFO | Step 13: local_loss: -0.3788289427757263 | global_loss: 1.4352355003356934 | total_loss: 1.0564064979553223\n",
      "2025-07-23 14:18:38 | INFO | Step 14: local_loss: -0.346511572599411 | global_loss: 1.411325216293335 | total_loss: 1.0648136138916016\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2025-07-23 14:19:37 | INFO | Step 15: local_loss: -0.36744368076324463 | global_loss: 1.5110448598861694 | total_loss: 1.1436011791229248\n",
      "2025-07-23 14:20:35 | INFO | Step 16: local_loss: -0.40504199266433716 | global_loss: 1.6576842069625854 | total_loss: 1.2526421546936035\n",
      "2025-07-23 14:21:33 | INFO | Step 17: local_loss: -0.4176269769668579 | global_loss: 1.622543454170227 | total_loss: 1.2049164772033691\n",
      "2025-07-23 14:22:17 | INFO | Step 18: local_loss: -0.3429964482784271 | global_loss: 1.6146882772445679 | total_loss: 1.2716917991638184\n",
      "2025-07-23 14:23:14 | INFO | Step 19: local_loss: -0.36925208568573 | global_loss: 1.7155885696411133 | total_loss: 1.3463364839553833\n",
      "2025-07-23 14:24:13 | INFO | Step 20: local_loss: -0.35452476143836975 | global_loss: 1.3512542247772217 | total_loss: 0.9967294931411743\n",
      "2025-07-23 14:25:11 | INFO | Step 21: local_loss: -0.30019304156303406 | global_loss: 1.449941873550415 | total_loss: 1.1497488021850586\n",
      "2025-07-23 14:26:10 | INFO | Step 22: local_loss: -0.35918277502059937 | global_loss: 1.715255856513977 | total_loss: 1.3560731410980225\n",
      "2025-07-23 14:27:07 | INFO | Step 23: local_loss: -0.2485891878604889 | global_loss: 1.5934926271438599 | total_loss: 1.3449034690856934\n",
      "2025-07-23 14:28:06 | INFO | Step 24: local_loss: -0.27290526032447815 | global_loss: 1.4476253986358643 | total_loss: 1.1747201681137085\n",
      "2025-07-23 14:29:04 | INFO | Step 25: local_loss: -0.33542442321777344 | global_loss: 1.4619395732879639 | total_loss: 1.1265151500701904\n",
      "2025-07-23 14:30:03 | INFO | Step 26: local_loss: -0.39067763090133667 | global_loss: 1.47545325756073 | total_loss: 1.084775686264038\n",
      "2025-07-23 14:31:01 | INFO | Step 27: local_loss: -0.3180164694786072 | global_loss: 1.6742552518844604 | total_loss: 1.356238842010498\n",
      "2025-07-23 14:31:59 | INFO | Step 28: local_loss: -0.35014069080352783 | global_loss: 1.5645740032196045 | total_loss: 1.2144333124160767\n",
      "2025-07-23 14:32:58 | INFO | Step 29: local_loss: -0.29293695092201233 | global_loss: 1.567837119102478 | total_loss: 1.274900197982788\n",
      "2025-07-23 14:33:56 | INFO | Step 30: local_loss: -0.2870680093765259 | global_loss: 1.474613904953003 | total_loss: 1.187545895576477\n",
      "2025-07-23 14:34:55 | INFO | Step 31: local_loss: -0.32182949781417847 | global_loss: 1.4282357692718506 | total_loss: 1.1064062118530273\n",
      "2025-07-23 14:35:51 | INFO | Step 32: local_loss: -0.3214755058288574 | global_loss: 1.6356087923049927 | total_loss: 1.3141332864761353\n",
      "2025-07-23 14:36:50 | INFO | Step 33: local_loss: -0.3710607588291168 | global_loss: 1.444584846496582 | total_loss: 1.0735241174697876\n",
      "2025-07-23 14:37:49 | INFO | Step 34: local_loss: -0.3620481789112091 | global_loss: 1.5262060165405273 | total_loss: 1.1641578674316406\n",
      "2025-07-23 14:38:48 | INFO | Step 35: local_loss: -0.35628393292427063 | global_loss: 1.4408559799194336 | total_loss: 1.0845720767974854\n",
      "2025-07-23 14:39:47 | INFO | Step 36: local_loss: -0.2649427652359009 | global_loss: 1.5017368793487549 | total_loss: 1.236794114112854\n",
      "2025-07-23 14:40:45 | INFO | Step 37: local_loss: -0.28739240765571594 | global_loss: 1.1874303817749023 | total_loss: 0.9000380039215088\n",
      "2025-07-23 14:41:44 | INFO | Step 38: local_loss: -0.3790745735168457 | global_loss: 1.4805803298950195 | total_loss: 1.1015057563781738\n",
      "2025-07-23 14:42:28 | INFO | Step 39: local_loss: -0.2635973393917084 | global_loss: 1.269214391708374 | total_loss: 1.0056170225143433\n",
      "2025-07-23 14:43:28 | INFO | Step 40: local_loss: -0.3256365656852722 | global_loss: 1.5495216846466064 | total_loss: 1.2238850593566895\n",
      "2025-07-23 14:44:25 | INFO | Step 41: local_loss: -0.359374463558197 | global_loss: 1.5992263555526733 | total_loss: 1.239851951599121\n",
      "2025-07-23 14:45:24 | INFO | Step 42: local_loss: -0.32792067527770996 | global_loss: 1.4431962966918945 | total_loss: 1.1152756214141846\n",
      "2025-07-23 14:46:22 | INFO | Step 43: local_loss: -0.37369537353515625 | global_loss: 1.6423722505569458 | total_loss: 1.2686768770217896\n",
      "2025-07-23 14:47:22 | INFO | Step 44: local_loss: -0.33814817667007446 | global_loss: 1.3747512102127075 | total_loss: 1.0366029739379883\n",
      "2025-07-23 14:48:20 | INFO | Step 45: local_loss: -0.22757035493850708 | global_loss: 1.191862940788269 | total_loss: 0.964292585849762\n",
      "2025-07-23 14:49:19 | INFO | Step 46: local_loss: -0.29729053378105164 | global_loss: 1.4240994453430176 | total_loss: 1.1268088817596436\n",
      "2025-07-23 14:50:17 | INFO | Step 47: local_loss: -0.2525763511657715 | global_loss: 1.3369166851043701 | total_loss: 1.0843403339385986\n",
      "2025-07-23 14:51:16 | INFO | Step 48: local_loss: -0.3116440773010254 | global_loss: 1.28818678855896 | total_loss: 0.9765427112579346\n",
      "2025-07-23 14:52:15 | INFO | Step 49: local_loss: -0.2874007821083069 | global_loss: 1.1685559749603271 | total_loss: 0.8811551928520203\n",
      "2025-07-23 14:53:13 | INFO | Step 50: local_loss: -0.30338966846466064 | global_loss: 1.3591294288635254 | total_loss: 1.0557397603988647\n",
      "2025-07-23 14:54:11 | INFO | Step 51: local_loss: -0.26907777786254883 | global_loss: 1.4425299167633057 | total_loss: 1.1734521389007568\n",
      "2025-07-23 14:55:10 | INFO | Step 52: local_loss: -0.26620885729789734 | global_loss: 1.383865475654602 | total_loss: 1.1176565885543823\n",
      "2025-07-23 14:56:09 | INFO | Step 53: local_loss: -0.26796552538871765 | global_loss: 1.201613426208496 | total_loss: 0.933647871017456\n",
      "2025-07-23 14:57:06 | INFO | Step 54: local_loss: -0.37415096163749695 | global_loss: 1.3139601945877075 | total_loss: 0.9398092031478882\n",
      "2025-07-23 14:58:06 | INFO | Step 55: local_loss: -0.20640967786312103 | global_loss: 1.1914408206939697 | total_loss: 0.9850311279296875\n",
      "2025-07-23 14:59:04 | INFO | Step 56: local_loss: -0.2741996645927429 | global_loss: 1.3192957639694214 | total_loss: 1.0450961589813232\n",
      "2025-07-23 15:00:04 | INFO | Step 57: local_loss: -0.35987356305122375 | global_loss: 1.5361669063568115 | total_loss: 1.1762933731079102\n",
      "2025-07-23 15:01:02 | INFO | Step 58: local_loss: -0.3516319990158081 | global_loss: 1.447249412536621 | total_loss: 1.095617413520813\n",
      "2025-07-23 15:02:00 | INFO | Step 59: local_loss: -0.26126551628112793 | global_loss: 1.3127236366271973 | total_loss: 1.0514581203460693\n",
      "2025-07-23 15:02:59 | INFO | Step 60: local_loss: -0.4205729067325592 | global_loss: 1.3516238927841187 | total_loss: 0.9310510158538818\n",
      "2025-07-23 15:03:58 | INFO | Step 61: local_loss: -0.30987784266471863 | global_loss: 1.4291009902954102 | total_loss: 1.1192231178283691\n",
      "2025-07-23 15:04:43 | INFO | Step 62: local_loss: -0.36941394209861755 | global_loss: 1.2721993923187256 | total_loss: 0.9027854204177856\n",
      "2025-07-23 15:05:41 | INFO | Step 63: local_loss: -0.197242870926857 | global_loss: 1.3602478504180908 | total_loss: 1.163004994392395\n",
      "2025-07-23 15:06:40 | INFO | Step 64: local_loss: -0.31499120593070984 | global_loss: 1.2566735744476318 | total_loss: 0.9416823387145996\n",
      "2025-07-23 15:07:39 | INFO | Step 65: local_loss: -0.33306923508644104 | global_loss: 1.456932783126831 | total_loss: 1.1238635778427124\n",
      "2025-07-23 15:08:37 | INFO | Step 66: local_loss: -0.4120079278945923 | global_loss: 1.503354549407959 | total_loss: 1.0913466215133667\n",
      "2025-07-23 15:09:22 | INFO | Step 67: local_loss: -0.360014408826828 | global_loss: 1.5436246395111084 | total_loss: 1.183610200881958\n",
      "2025-07-23 15:10:18 | INFO | Step 68: local_loss: -0.28067755699157715 | global_loss: 1.3282020092010498 | total_loss: 1.0475244522094727\n",
      "2025-07-23 15:11:02 | INFO | Step 69: local_loss: -0.2898772656917572 | global_loss: 1.2996400594711304 | total_loss: 1.0097627639770508\n",
      "2025-07-23 15:11:59 | INFO | Step 70: local_loss: -0.3287203013896942 | global_loss: 1.306852102279663 | total_loss: 0.9781317710876465\n",
      "2025-07-23 15:12:58 | INFO | Step 71: local_loss: -0.327339768409729 | global_loss: 1.2251557111740112 | total_loss: 0.8978159427642822\n",
      "2025-07-23 15:13:56 | INFO | Step 72: local_loss: -0.24801772832870483 | global_loss: 0.9212933778762817 | total_loss: 0.6732756495475769\n",
      "2025-07-23 15:14:56 | INFO | Step 73: local_loss: -0.21135643124580383 | global_loss: 1.176271677017212 | total_loss: 0.9649152755737305\n",
      "2025-07-23 15:15:55 | INFO | Step 74: local_loss: -0.2490820437669754 | global_loss: 1.445286512374878 | total_loss: 1.196204423904419\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2025-07-23 15:16:54 | INFO | Step 75: local_loss: -0.3424496352672577 | global_loss: 1.3665566444396973 | total_loss: 1.0241069793701172\n",
      "2025-07-23 15:17:53 | INFO | Step 76: local_loss: -0.23215773701667786 | global_loss: 1.1024141311645508 | total_loss: 0.8702564239501953\n",
      "2025-07-23 15:18:51 | INFO | Step 77: local_loss: -0.118079774081707 | global_loss: 1.0057315826416016 | total_loss: 0.887651801109314\n",
      "2025-07-23 15:19:49 | INFO | Step 78: local_loss: -0.3327029049396515 | global_loss: 1.4129530191421509 | total_loss: 1.0802501440048218\n",
      "2025-07-23 15:20:48 | INFO | Step 79: local_loss: -0.12197347730398178 | global_loss: 1.108267903327942 | total_loss: 0.9862944483757019\n",
      "2025-07-23 15:21:47 | INFO | Step 80: local_loss: -0.3535514771938324 | global_loss: 1.361393928527832 | total_loss: 1.0078424215316772\n",
      "2025-07-23 15:22:31 | INFO | Step 81: local_loss: -0.2495347112417221 | global_loss: 1.257918357849121 | total_loss: 1.0083836317062378\n",
      "2025-07-23 15:23:30 | INFO | Step 82: local_loss: -0.2982451617717743 | global_loss: 1.2360215187072754 | total_loss: 0.9377763271331787\n",
      "2025-07-23 15:24:30 | INFO | Step 83: local_loss: -0.18739905953407288 | global_loss: 0.9713298082351685 | total_loss: 0.783930778503418\n",
      "2025-07-23 15:25:28 | INFO | Step 84: local_loss: -0.3936282992362976 | global_loss: 1.2416664361953735 | total_loss: 0.8480381369590759\n",
      "2025-07-23 15:26:14 | INFO | Step 85: local_loss: -0.29828494787216187 | global_loss: 1.2602474689483643 | total_loss: 0.9619625210762024\n",
      "2025-07-23 15:27:14 | INFO | Step 86: local_loss: -0.28745195269584656 | global_loss: 1.1740556955337524 | total_loss: 0.8866037130355835\n",
      "2025-07-23 15:28:13 | INFO | Step 87: local_loss: -0.2911079227924347 | global_loss: 1.1259040832519531 | total_loss: 0.8347961902618408\n",
      "2025-07-23 15:29:12 | INFO | Step 88: local_loss: -0.1785629242658615 | global_loss: 1.036388874053955 | total_loss: 0.8578259348869324\n",
      "2025-07-23 15:30:11 | INFO | Step 89: local_loss: -0.14439059793949127 | global_loss: 0.8716400265693665 | total_loss: 0.7272494435310364\n",
      "2025-07-23 15:31:11 | INFO | Step 90: local_loss: -0.17666397988796234 | global_loss: 1.0517780780792236 | total_loss: 0.8751140832901001\n",
      "2025-07-23 15:32:11 | INFO | Step 91: local_loss: -0.1928798258304596 | global_loss: 1.318280577659607 | total_loss: 1.1254007816314697\n",
      "2025-07-23 15:32:56 | INFO | Step 92: local_loss: -0.13270871341228485 | global_loss: 1.0058294534683228 | total_loss: 0.8731207251548767\n",
      "2025-07-23 15:33:55 | INFO | Step 93: local_loss: -0.15522336959838867 | global_loss: 0.9987976551055908 | total_loss: 0.8435742855072021\n",
      "2025-07-23 15:34:55 | INFO | Step 94: local_loss: -0.19734521210193634 | global_loss: 1.0771111249923706 | total_loss: 0.8797659277915955\n",
      "2025-07-23 15:35:39 | INFO | Step 95: local_loss: -0.3135007917881012 | global_loss: 1.0071059465408325 | total_loss: 0.6936051845550537\n",
      "2025-07-23 15:36:37 | INFO | Step 96: local_loss: -0.2869383692741394 | global_loss: 1.0401155948638916 | total_loss: 0.7531772255897522\n",
      "2025-07-23 15:37:36 | INFO | Step 97: local_loss: -0.31791672110557556 | global_loss: 1.1398603916168213 | total_loss: 0.8219436407089233\n",
      "2025-07-23 15:38:34 | INFO | Step 98: local_loss: -0.2495616227388382 | global_loss: 0.9471582770347595 | total_loss: 0.6975966691970825\n",
      "2025-07-23 15:39:33 | INFO | Step 99: local_loss: -0.2831840217113495 | global_loss: 1.0424473285675049 | total_loss: 0.759263277053833\n",
      "2025-07-23 15:39:33 | INFO | Center node 7572 finished at step 100 with loss 0.7593\n",
      "2025-07-23 15:39:33 | INFO | \n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[7]\u001b[39m\u001b[32m, line 148\u001b[39m\n\u001b[32m    146\u001b[39m bp_node_features = []\n\u001b[32m    147\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bp_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m148\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    149\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    150\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 67\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     65\u001b[39m sub_edge_index = []\n\u001b[32m     66\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m67\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     68\u001b[39m     mask = np.isin(edge_index[\u001b[32m0\u001b[39m], sub_nodes) & np.isin(edge_index[\u001b[32m1\u001b[39m], sub_nodes)\n\u001b[32m     69\u001b[39m     local_edges = edge_index[:, mask]\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    #     \"dataset\": \"yelp\",\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"seed\": 76,\n",
    "    #     预训练参数\n",
    "    \"pretrain_epochs\": 10,\n",
    "    \"max_steps\": 100,\n",
    "    \"sample_size\": 100,\n",
    "    \"loss_threshold\": 0.1,\n",
    "    \"pretrain_lr\": 0.0015,  # 0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    #     分类训练参数\n",
    "    \"batch_size\": 8,\n",
    "    \"num_epochs\": 80,\n",
    "    \"patience\": 30,\n",
    "    \"weight\": 0.6,\n",
    "    \"test_size\": 0.3,\n",
    "    \"val_size\": 0.5,\n",
    "    # 模型结构参数\n",
    "    \"layers_tree\": 7,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "# 创建 logger\n",
    "logger = setup_logger()\n",
    "logger.info('============  BSNE Training  ============')\n",
    "logger.info('Args:\\n' + json.dumps(args, indent=2, ensure_ascii=False))\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "logger.info(device)\n",
    "logger.info('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "logger.info(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=args[\"weight_decay\"]\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "temperature = 0.3  # 越小区分性越强\n",
    "\n",
    "loss_threshold = args['loss_threshold']\n",
    "sample_size = args['sample_size']\n",
    "max_steps = args['max_steps']\n",
    "max_epochs = args['pretrain_epochs']\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    logger.info(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.exp(-dist_row / temperature)\n",
    "\n",
    "    probs[center_idx] = 0\n",
    "    probs = probs / (probs.sum() + 1e-10)\n",
    "\n",
    "    available_nodes = len(dist_row) - 1\n",
    "\n",
    "    # 构建Bp子图\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        bp_nodes = neighbors.tolist()\n",
    "\n",
    "    # 构建Bu子图\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        bu_nodes = neighbors.tolist()\n",
    "\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "\n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "\n",
    "        center_node_subgraph = create_node_subgraph(center_idx, feat_data, edge_indexs, device)\n",
    "        _, center_feature = bsne_model([center_node_subgraph])\n",
    "        center_feature = center_feature.squeeze(0)\n",
    "\n",
    "\n",
    "\n",
    "        P = torch.tensor(probs[bp_nodes], device=device, dtype=torch.float32)\n",
    "        P = P/ P.sum()\n",
    "        # print(P)\n",
    "        # p_entropy = -torch.sum(P * torch.log(P)).item()\n",
    "        # print(f\"p_entropy: {p_entropy:.4f}\")\n",
    "\n",
    "        # 计算Q向量（欧式距离）\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), bp_features).squeeze(0)\n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        log_ratio = (torch.log(P / Q)) ** 2\n",
    "        # log_ratio = torch.log(P / Q)\n",
    "        loss_local = log_ratio.mean()\n",
    "\n",
    "        # 全局loss计算\n",
    "        # 计算Bu子图中所有节点的特征\n",
    "        bu_node_features = []\n",
    "        for node_idx in bu_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "        bu_features = torch.stack(bu_node_features)\n",
    "\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "\n",
    "        N = len(probs)\n",
    "        k_Bp = probs[bp_nodes].sum() * (N / len(bp_nodes))\n",
    "        loss_global = (torch.log(k_Bp*sum_e_bu/sum_e_bp))**2\n",
    "        #         #加上平方项\n",
    "        #         loss_global = (torch.log(global_ratio.clamp(min=eps, max=1e10)))**2\n",
    "        total_loss = loss_local + loss_global\n",
    "        # total_loss = loss_local\n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        logger.info(f\"Step {step}: local_loss: {loss_local.item()} | global_loss: {loss_global.item()} | total_loss: {total_loss.item()}\")\n",
    "        step += 1\n",
    "\n",
    "        if total_loss.item() < loss_threshold or step >= max_steps:\n",
    "            logger.info(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "\n",
    "logger.info(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=args[\"weight_decay\"]\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        logger.info(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f} | Val GMean: {val_g_mean:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            logger.info(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "logger.info(f'\\n=== Final Test Results ===')\n",
    "logger.info(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e209f25",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fcd0ceff",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0328eee6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fdbc6310",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
