{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "97cdbbc1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/home/sczc619/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import os\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import copy\n",
    "import copy as cp\n",
    "import dgl\n",
    "from collections import defaultdict\n",
    "import time\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, \\\n",
    "    confusion_matrix\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "\n",
    "\n",
    "def create_node_subgraph(node_idx, feat_data, edge_indexs, device):\n",
    "    \"\"\"\n",
    "    为单个节点创建一阶邻居子图（所有邻居）\n",
    "    \"\"\"\n",
    "    neighbors = set()\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        # 找出以中心点为起点的边的终点\n",
    "        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()\n",
    "        neighbors.update(rel_neighbors)\n",
    "\n",
    "    # 移除中心节点自身\n",
    "    neighbors.discard(node_idx)\n",
    "    neighbors = list(neighbors)\n",
    "\n",
    "    # 如果邻居太多，进行随机采样截取\n",
    "    sample_size = 399\n",
    "    if len(neighbors) > sample_size:\n",
    "        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()\n",
    "\n",
    "    # 构建子图节点列表，确保中心节点是第一个\n",
    "    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]\n",
    "\n",
    "    # 构建子图边列表\n",
    "    sub_edge_index = []\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "        local_edges = edge_index[:, mask]\n",
    "\n",
    "        # 创建节点映射\n",
    "        node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "\n",
    "        # 将全局索引映射到局部索引\n",
    "        if len(local_edges) > 0 and local_edges.size > 0:\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "        else:\n",
    "            # 添加自环确保图不为空\n",
    "            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)\n",
    "\n",
    "        sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "    # 创建子图数据\n",
    "    subgraph = {\n",
    "        'features': feat_data[sub_nodes].clone(),\n",
    "        'edges': sub_edge_index,\n",
    "        'global_idx': sub_nodes\n",
    "    }\n",
    "\n",
    "    return subgraph\n",
    "\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):\n",
    "    model.eval()\n",
    "    all_probs = []\n",
    "    all_labels = []\n",
    "\n",
    "    # 分批处理\n",
    "    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):\n",
    "        subgraph_data = []\n",
    "\n",
    "        # 为每个中心节点构建子图\n",
    "        for xi in batch_centers:\n",
    "            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "            subgraph_data.append(subgraph)\n",
    "\n",
    "        # 获取中心节点预测\n",
    "        with torch.no_grad():\n",
    "            center_logits, _ = model(subgraph_data)  # [B, 2]\n",
    "            probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率\n",
    "            all_probs.extend(probs.cpu().numpy())\n",
    "            all_labels.extend([labels[xi] for xi in batch_centers])\n",
    "\n",
    "    # 计算指标\n",
    "    auc_score = roc_auc_score(all_labels, all_probs)\n",
    "    ap_score = average_precision_score(all_labels, all_probs)\n",
    "    pred_labels = (np.array(all_probs) >= 0.5).astype(int)\n",
    "    f1 = f1_score(all_labels, pred_labels, average='macro')\n",
    "    g_mean = calculate_g_mean(all_labels, pred_labels)\n",
    "\n",
    "    return auc_score, ap_score, f1, g_mean\n",
    "\n",
    "\n",
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    #         self.node_feature_extractor = nn.Sequential(\n",
    "    #             nn.Linear(relation_nums * d_model, d_model),\n",
    "    #             nn.ReLU(),\n",
    "    #             nn.LayerNorm(d_model)\n",
    "    #         )\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 为每个子图单独处理\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 处理单个子图\n",
    "            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码\n",
    "                encoder_output = self.relation_encoders[rel_idx](\n",
    "                    src=features,\n",
    "                    mask=adj_mask\n",
    "                )\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征（第一个节点）\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            center_logits = self.classifier(center_features)  # [1, out_feat]\n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "81c3027f",
   "metadata": {},
   "outputs": [],
   "source": [
    "   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "50a6d575",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "Step 0: local_loss: 22.743011474609375\n",
      "Step 1: local_loss: 21.630773544311523\n",
      "Step 2: local_loss: 23.551734924316406\n",
      "Step 3: local_loss: 19.604684829711914\n",
      "Step 4: local_loss: 23.051990509033203\n",
      "Step 5: local_loss: 21.404428482055664\n",
      "Step 6: local_loss: 21.70035171508789\n",
      "Step 7: local_loss: 21.635469436645508\n",
      "Step 8: local_loss: 22.81314468383789\n",
      "Step 9: local_loss: 22.013681411743164\n",
      "Step 10: local_loss: 21.29526138305664\n",
      "Step 11: local_loss: 19.80583953857422\n",
      "Step 12: local_loss: 19.959632873535156\n",
      "Step 13: local_loss: 23.96780776977539\n",
      "Step 14: local_loss: 20.592151641845703\n",
      "Step 15: local_loss: 21.828487396240234\n",
      "Step 16: local_loss: 22.068681716918945\n",
      "Step 17: local_loss: 20.3740291595459\n",
      "Step 18: local_loss: 19.441516876220703\n",
      "Step 19: local_loss: 22.19944953918457\n",
      "Step 20: local_loss: 22.587182998657227\n",
      "Step 21: local_loss: 23.23703384399414\n",
      "Step 22: local_loss: 21.114776611328125\n",
      "Step 23: local_loss: 19.731983184814453\n",
      "Step 24: local_loss: 22.02873992919922\n",
      "Step 25: local_loss: 21.340959548950195\n",
      "Step 26: local_loss: 18.537858963012695\n",
      "Step 27: local_loss: 24.979448318481445\n",
      "Step 28: local_loss: 21.652395248413086\n",
      "Step 29: local_loss: 18.879053115844727\n",
      "Step 30: local_loss: 19.138221740722656\n",
      "Step 31: local_loss: 23.06127166748047\n",
      "Step 32: local_loss: 21.555402755737305\n",
      "Step 33: local_loss: 22.472822189331055\n",
      "Step 34: local_loss: 20.723360061645508\n",
      "Step 35: local_loss: 19.990520477294922\n",
      "Step 36: local_loss: 22.19894790649414\n",
      "Step 37: local_loss: 22.06330108642578\n",
      "Step 38: local_loss: 20.7579402923584\n",
      "Step 39: local_loss: 21.15227699279785\n",
      "Step 40: local_loss: 19.97917938232422\n",
      "Step 41: local_loss: 22.796714782714844\n",
      "Step 42: local_loss: 23.542316436767578\n",
      "Step 43: local_loss: 24.002948760986328\n",
      "Step 44: local_loss: 22.147968292236328\n",
      "Step 45: local_loss: 22.4528751373291\n",
      "Step 46: local_loss: 20.896806716918945\n",
      "Step 47: local_loss: 18.671350479125977\n",
      "Step 48: local_loss: 23.447845458984375\n",
      "Step 49: local_loss: 18.874399185180664\n",
      "Step 50: local_loss: 23.78813362121582\n",
      "Step 51: local_loss: 20.725555419921875\n",
      "Step 52: local_loss: 21.943010330200195\n",
      "Step 53: local_loss: 21.346160888671875\n",
      "Step 54: local_loss: 20.348093032836914\n",
      "Step 55: local_loss: 22.006006240844727\n",
      "Step 56: local_loss: 21.25057029724121\n",
      "Step 57: local_loss: 20.925029754638672\n",
      "Step 58: local_loss: 21.903093338012695\n",
      "Step 59: local_loss: 20.740840911865234\n",
      "Step 60: local_loss: 22.239046096801758\n",
      "Step 61: local_loss: 22.153894424438477\n",
      "Step 62: local_loss: 20.766786575317383\n",
      "Step 63: local_loss: 22.167444229125977\n",
      "Step 64: local_loss: 20.97481918334961\n",
      "Step 65: local_loss: 21.67381477355957\n",
      "Step 66: local_loss: 21.165939331054688\n",
      "Step 67: local_loss: 18.648235321044922\n",
      "Step 68: local_loss: 17.40168571472168\n",
      "Step 69: local_loss: 20.98682975769043\n",
      "Step 70: local_loss: 24.171297073364258\n",
      "Step 71: local_loss: 20.25983238220215\n",
      "Step 72: local_loss: 19.916244506835938\n",
      "Step 73: local_loss: 23.1492862701416\n",
      "Step 74: local_loss: 17.787033081054688\n",
      "Step 75: local_loss: 19.32673454284668\n",
      "Step 76: local_loss: 23.180526733398438\n",
      "Step 77: local_loss: 19.774660110473633\n",
      "Step 78: local_loss: 19.61193084716797\n",
      "Step 79: local_loss: 21.71895408630371\n",
      "Step 80: local_loss: 22.019580841064453\n",
      "Step 81: local_loss: 21.542734146118164\n",
      "Step 82: local_loss: 19.73023796081543\n",
      "Step 83: local_loss: 20.61797332763672\n",
      "Step 84: local_loss: 20.621667861938477\n",
      "Step 85: local_loss: 22.5573787689209\n",
      "Step 86: local_loss: 22.616846084594727\n",
      "Step 87: local_loss: 20.660030364990234\n",
      "Step 88: local_loss: 22.315547943115234\n",
      "Step 89: local_loss: 20.704837799072266\n",
      "Step 90: local_loss: 22.35274887084961\n",
      "Step 91: local_loss: 19.654264450073242\n",
      "Step 92: local_loss: 18.290430068969727\n",
      "Step 93: local_loss: 21.472261428833008\n",
      "Step 94: local_loss: 21.242630004882812\n",
      "Step 95: local_loss: 21.120269775390625\n",
      "Step 96: local_loss: 21.301286697387695\n",
      "Step 97: local_loss: 20.862098693847656\n",
      "Step 98: local_loss: 21.057817459106445\n",
      "Step 99: local_loss: 18.34090232849121\n",
      "Center node 268 finished at step 100 with loss 18.3409\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "Step 0: local_loss: 18.783838272094727\n",
      "Step 1: local_loss: 17.38287925720215\n",
      "Step 2: local_loss: 15.651813507080078\n",
      "Step 3: local_loss: 16.988128662109375\n",
      "Step 4: local_loss: 17.053646087646484\n",
      "Step 5: local_loss: 15.777667999267578\n",
      "Step 6: local_loss: 14.417561531066895\n",
      "Step 7: local_loss: 19.305932998657227\n",
      "Step 8: local_loss: 15.302616119384766\n",
      "Step 9: local_loss: 17.396900177001953\n",
      "Step 10: local_loss: 16.560495376586914\n",
      "Step 11: local_loss: 14.59826374053955\n",
      "Step 12: local_loss: 19.12807273864746\n",
      "Step 13: local_loss: 14.486401557922363\n",
      "Step 14: local_loss: 15.888385772705078\n",
      "Step 15: local_loss: 11.874855995178223\n",
      "Step 16: local_loss: 17.591655731201172\n",
      "Step 17: local_loss: 13.569513320922852\n",
      "Step 18: local_loss: 17.760526657104492\n",
      "Step 19: local_loss: 17.408096313476562\n",
      "Step 20: local_loss: 15.565116882324219\n",
      "Step 21: local_loss: 17.29791831970215\n",
      "Step 22: local_loss: 16.96759033203125\n",
      "Step 23: local_loss: 18.790050506591797\n",
      "Step 24: local_loss: 17.42145347595215\n",
      "Step 25: local_loss: 17.2171573638916\n",
      "Step 26: local_loss: 16.30634880065918\n",
      "Step 27: local_loss: 14.3755464553833\n",
      "Step 28: local_loss: 15.669811248779297\n",
      "Step 29: local_loss: 16.50704574584961\n",
      "Step 30: local_loss: 17.42561912536621\n",
      "Step 31: local_loss: 15.890236854553223\n",
      "Step 32: local_loss: 14.815683364868164\n",
      "Step 33: local_loss: 14.728540420532227\n",
      "Step 34: local_loss: 17.09799575805664\n",
      "Step 35: local_loss: 13.747146606445312\n",
      "Step 36: local_loss: 14.534600257873535\n",
      "Step 37: local_loss: 15.341940879821777\n",
      "Step 38: local_loss: 15.224967002868652\n",
      "Step 39: local_loss: 16.717512130737305\n",
      "Step 40: local_loss: 17.80699348449707\n",
      "Step 41: local_loss: 15.720580101013184\n",
      "Step 42: local_loss: 15.698163986206055\n",
      "Step 43: local_loss: 14.007818222045898\n",
      "Step 44: local_loss: 17.15212631225586\n",
      "Step 45: local_loss: 16.024169921875\n",
      "Step 46: local_loss: 13.006643295288086\n",
      "Step 47: local_loss: 14.673269271850586\n",
      "Step 48: local_loss: 14.802223205566406\n",
      "Step 49: local_loss: 13.82278060913086\n",
      "Step 50: local_loss: 14.447976112365723\n",
      "Step 51: local_loss: 14.954352378845215\n",
      "Step 52: local_loss: 16.84776496887207\n",
      "Step 53: local_loss: 15.692940711975098\n",
      "Step 54: local_loss: 14.54338264465332\n",
      "Step 55: local_loss: 15.302814483642578\n",
      "Step 56: local_loss: 16.90806007385254\n",
      "Step 57: local_loss: 16.233928680419922\n",
      "Step 58: local_loss: 14.193130493164062\n",
      "Step 59: local_loss: 19.850807189941406\n",
      "Step 60: local_loss: 16.576887130737305\n",
      "Step 61: local_loss: 14.92121696472168\n",
      "Step 62: local_loss: 14.474388122558594\n",
      "Step 63: local_loss: 14.658883094787598\n",
      "Step 64: local_loss: 16.05258560180664\n",
      "Step 65: local_loss: 18.18682861328125\n",
      "Step 66: local_loss: 14.644612312316895\n",
      "Step 67: local_loss: 15.549245834350586\n",
      "Step 68: local_loss: 14.628191947937012\n",
      "Step 69: local_loss: 14.104988098144531\n",
      "Step 70: local_loss: 11.447809219360352\n",
      "Step 71: local_loss: 12.336349487304688\n",
      "Step 72: local_loss: 16.22901153564453\n",
      "Step 73: local_loss: 15.728437423706055\n",
      "Step 74: local_loss: 14.745750427246094\n",
      "Step 75: local_loss: 14.871647834777832\n",
      "Step 76: local_loss: 16.94740867614746\n",
      "Step 77: local_loss: 12.502545356750488\n",
      "Step 78: local_loss: 12.654134750366211\n",
      "Step 79: local_loss: 10.915334701538086\n",
      "Step 80: local_loss: 10.433374404907227\n",
      "Step 81: local_loss: 12.669794082641602\n",
      "Step 82: local_loss: 12.7332124710083\n",
      "Step 83: local_loss: 13.702958106994629\n",
      "Step 84: local_loss: 12.76926326751709\n",
      "Step 85: local_loss: 13.80978012084961\n",
      "Step 86: local_loss: 10.769034385681152\n",
      "Step 87: local_loss: 10.576833724975586\n",
      "Step 88: local_loss: 12.083093643188477\n",
      "Step 89: local_loss: 12.690272331237793\n",
      "Step 90: local_loss: 11.823529243469238\n",
      "Step 91: local_loss: 12.103954315185547\n",
      "Step 92: local_loss: 11.167287826538086\n",
      "Step 93: local_loss: 10.07229232788086\n",
      "Step 94: local_loss: 13.502108573913574\n",
      "Step 95: local_loss: 11.109376907348633\n",
      "Step 96: local_loss: 11.026754379272461\n",
      "Step 97: local_loss: 12.730220794677734\n",
      "Step 98: local_loss: 12.739757537841797\n",
      "Step 99: local_loss: 11.209835052490234\n",
      "Center node 7572 finished at step 100 with loss 11.2098\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 0: local_loss: 16.754117965698242\n",
      "Step 1: local_loss: 16.91837501525879\n",
      "Step 2: local_loss: 16.779497146606445\n",
      "Step 3: local_loss: 17.90238380432129\n",
      "Step 4: local_loss: 16.656877517700195\n",
      "Step 5: local_loss: 18.15997314453125\n",
      "Step 6: local_loss: 15.658217430114746\n",
      "Step 7: local_loss: 17.476011276245117\n",
      "Step 8: local_loss: 16.293540954589844\n",
      "Step 9: local_loss: 14.329795837402344\n",
      "Step 10: local_loss: 19.222795486450195\n",
      "Step 11: local_loss: 17.959510803222656\n",
      "Step 12: local_loss: 14.839151382446289\n",
      "Step 13: local_loss: 17.099809646606445\n",
      "Step 14: local_loss: 18.296293258666992\n",
      "Step 15: local_loss: 14.653326988220215\n",
      "Step 16: local_loss: 14.855155944824219\n",
      "Step 17: local_loss: 17.5089168548584\n",
      "Step 18: local_loss: 14.042901039123535\n",
      "Step 19: local_loss: 11.065705299377441\n",
      "Step 20: local_loss: 17.374752044677734\n",
      "Step 21: local_loss: 15.143600463867188\n",
      "Step 22: local_loss: 14.003353118896484\n",
      "Step 23: local_loss: 14.699555397033691\n",
      "Step 24: local_loss: 15.07144832611084\n",
      "Step 25: local_loss: 16.583566665649414\n",
      "Step 26: local_loss: 16.230918884277344\n",
      "Step 27: local_loss: 12.601951599121094\n",
      "Step 28: local_loss: 15.560201644897461\n",
      "Step 29: local_loss: 13.861422538757324\n",
      "Step 30: local_loss: 17.53662109375\n",
      "Step 31: local_loss: 15.25755500793457\n",
      "Step 32: local_loss: 15.92314338684082\n",
      "Step 33: local_loss: 13.557485580444336\n",
      "Step 34: local_loss: 13.481889724731445\n",
      "Step 35: local_loss: 16.471670150756836\n",
      "Step 36: local_loss: 13.079158782958984\n",
      "Step 37: local_loss: 15.898930549621582\n",
      "Step 38: local_loss: 13.972370147705078\n",
      "Step 39: local_loss: 14.962238311767578\n",
      "Step 40: local_loss: 15.713293075561523\n",
      "Step 41: local_loss: 13.02350902557373\n",
      "Step 42: local_loss: 13.453707695007324\n",
      "Step 43: local_loss: 13.65449333190918\n",
      "Step 44: local_loss: 12.533037185668945\n",
      "Step 45: local_loss: 12.099661827087402\n",
      "Step 46: local_loss: 11.522624015808105\n",
      "Step 47: local_loss: 13.983675956726074\n",
      "Step 48: local_loss: 11.79383373260498\n",
      "Step 49: local_loss: 12.32363510131836\n",
      "Step 50: local_loss: 16.109418869018555\n",
      "Step 51: local_loss: 13.633210182189941\n",
      "Step 52: local_loss: 12.115607261657715\n",
      "Step 53: local_loss: 13.11174488067627\n",
      "Step 54: local_loss: 17.14703941345215\n",
      "Step 55: local_loss: 14.442611694335938\n",
      "Step 56: local_loss: 10.277677536010742\n",
      "Step 57: local_loss: 14.889280319213867\n",
      "Step 58: local_loss: 14.138233184814453\n",
      "Step 59: local_loss: 13.18144416809082\n",
      "Step 60: local_loss: 13.41242790222168\n",
      "Step 61: local_loss: 10.620720863342285\n",
      "Step 62: local_loss: 11.752236366271973\n",
      "Step 63: local_loss: 12.582361221313477\n",
      "Step 64: local_loss: 12.638511657714844\n",
      "Step 65: local_loss: 14.737371444702148\n",
      "Step 66: local_loss: 12.568758964538574\n",
      "Step 67: local_loss: 14.519932746887207\n",
      "Step 68: local_loss: 10.382108688354492\n",
      "Step 69: local_loss: 10.166996955871582\n",
      "Step 70: local_loss: 11.411060333251953\n",
      "Step 71: local_loss: 12.563882827758789\n",
      "Step 72: local_loss: 13.303482055664062\n",
      "Step 73: local_loss: 12.552863121032715\n",
      "Step 74: local_loss: 12.459322929382324\n",
      "Step 75: local_loss: 12.894918441772461\n",
      "Step 76: local_loss: 11.278231620788574\n",
      "Step 77: local_loss: 12.443333625793457\n",
      "Step 78: local_loss: 11.699679374694824\n",
      "Step 79: local_loss: 10.765985488891602\n",
      "Step 80: local_loss: 11.195624351501465\n",
      "Step 81: local_loss: 11.875199317932129\n",
      "Step 82: local_loss: 9.701939582824707\n",
      "Step 83: local_loss: 11.382908821105957\n",
      "Step 84: local_loss: 13.49209213256836\n",
      "Step 85: local_loss: 11.235051155090332\n",
      "Step 86: local_loss: 11.553586959838867\n",
      "Step 87: local_loss: 13.025548934936523\n",
      "Step 88: local_loss: 10.687942504882812\n",
      "Step 89: local_loss: 10.988456726074219\n",
      "Step 90: local_loss: 12.454767227172852\n",
      "Step 91: local_loss: 11.327982902526855\n",
      "Step 92: local_loss: 12.998970985412598\n",
      "Step 93: local_loss: 12.182092666625977\n",
      "Step 94: local_loss: 9.384035110473633\n",
      "Step 95: local_loss: 9.560519218444824\n",
      "Step 96: local_loss: 9.658470153808594\n",
      "Step 97: local_loss: 10.839054107666016\n",
      "Step 98: local_loss: 11.177996635437012\n",
      "Step 99: local_loss: 12.830595016479492\n",
      "Center node 7964 finished at step 100 with loss 12.8306\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 7312) ===\n",
      "Step 0: local_loss: 10.398693084716797\n",
      "Step 1: local_loss: 8.911346435546875\n",
      "Step 2: local_loss: 10.81093978881836\n",
      "Step 3: local_loss: 14.195866584777832\n",
      "Step 4: local_loss: 10.466085433959961\n",
      "Step 5: local_loss: 9.237317085266113\n",
      "Step 6: local_loss: 12.42180061340332\n",
      "Step 7: local_loss: 9.346487045288086\n",
      "Step 8: local_loss: 8.915030479431152\n",
      "Step 9: local_loss: 10.345846176147461\n",
      "Step 10: local_loss: 9.646509170532227\n",
      "Step 11: local_loss: 12.384398460388184\n",
      "Step 12: local_loss: 11.177815437316895\n",
      "Step 13: local_loss: 11.120838165283203\n",
      "Step 14: local_loss: 10.4934720993042\n",
      "Step 15: local_loss: 7.131105422973633\n",
      "Step 16: local_loss: 9.239835739135742\n",
      "Step 17: local_loss: 6.973435878753662\n",
      "Step 18: local_loss: 7.870360851287842\n",
      "Step 19: local_loss: 9.533658981323242\n",
      "Step 20: local_loss: 8.364072799682617\n",
      "Step 21: local_loss: 10.604110717773438\n",
      "Step 22: local_loss: 7.824256896972656\n",
      "Step 23: local_loss: 9.318231582641602\n",
      "Step 24: local_loss: 11.022616386413574\n",
      "Step 25: local_loss: 8.0265531539917\n",
      "Step 26: local_loss: 9.334905624389648\n",
      "Step 27: local_loss: 9.693342208862305\n",
      "Step 28: local_loss: 10.539758682250977\n",
      "Step 29: local_loss: 9.521905899047852\n",
      "Step 30: local_loss: 11.493864059448242\n",
      "Step 31: local_loss: 8.733633041381836\n",
      "Step 32: local_loss: 9.243410110473633\n",
      "Step 33: local_loss: 11.464179992675781\n",
      "Step 34: local_loss: 8.057110786437988\n",
      "Step 35: local_loss: 6.91462516784668\n",
      "Step 36: local_loss: 9.712748527526855\n",
      "Step 37: local_loss: 9.687972068786621\n",
      "Step 38: local_loss: 12.205376625061035\n",
      "Step 39: local_loss: 11.364213943481445\n",
      "Step 40: local_loss: 11.397501945495605\n",
      "Step 41: local_loss: 7.23866605758667\n",
      "Step 42: local_loss: 9.967370986938477\n",
      "Step 43: local_loss: 8.324380874633789\n",
      "Step 44: local_loss: 8.857231140136719\n",
      "Step 45: local_loss: 7.345305442810059\n",
      "Step 46: local_loss: 8.132174491882324\n",
      "Step 47: local_loss: 7.862095832824707\n",
      "Step 48: local_loss: 13.738770484924316\n",
      "Step 49: local_loss: 12.541729927062988\n",
      "Step 50: local_loss: 9.872453689575195\n",
      "Step 51: local_loss: 9.21006965637207\n",
      "Step 52: local_loss: 9.937644004821777\n",
      "Step 53: local_loss: 9.94309139251709\n",
      "Step 54: local_loss: 7.539446830749512\n",
      "Step 55: local_loss: 10.765592575073242\n",
      "Step 56: local_loss: 8.98691177368164\n",
      "Step 57: local_loss: 9.429049491882324\n",
      "Step 58: local_loss: 8.813859939575195\n",
      "Step 59: local_loss: 11.355937004089355\n",
      "Step 60: local_loss: 7.2154412269592285\n",
      "Step 61: local_loss: 11.627694129943848\n",
      "Step 62: local_loss: 9.595976829528809\n",
      "Step 63: local_loss: 7.086431980133057\n",
      "Step 64: local_loss: 7.811825752258301\n",
      "Step 65: local_loss: 5.707818031311035\n",
      "Step 66: local_loss: 8.409320831298828\n",
      "Step 67: local_loss: 9.96977424621582\n",
      "Step 68: local_loss: 7.674366474151611\n",
      "Step 69: local_loss: 8.191495895385742\n",
      "Step 70: local_loss: 7.313839912414551\n",
      "Step 71: local_loss: 7.482495307922363\n",
      "Step 72: local_loss: 8.359919548034668\n",
      "Step 73: local_loss: 10.146647453308105\n",
      "Step 74: local_loss: 8.897411346435547\n",
      "Step 75: local_loss: 7.730520725250244\n",
      "Step 76: local_loss: 8.860098838806152\n",
      "Step 77: local_loss: 7.373331546783447\n",
      "Step 78: local_loss: 7.425907135009766\n",
      "Step 79: local_loss: 9.582971572875977\n",
      "Step 80: local_loss: 10.907853126525879\n",
      "Step 81: local_loss: 6.328234672546387\n",
      "Step 82: local_loss: 10.454989433288574\n",
      "Step 83: local_loss: 8.86147689819336\n",
      "Step 84: local_loss: 9.823010444641113\n",
      "Step 85: local_loss: 6.923787593841553\n",
      "Step 86: local_loss: 7.456772327423096\n",
      "Step 87: local_loss: 8.131770133972168\n",
      "Step 88: local_loss: 9.630104064941406\n",
      "Step 89: local_loss: 8.649916648864746\n",
      "Step 90: local_loss: 8.05765151977539\n",
      "Step 91: local_loss: 8.814689636230469\n",
      "Step 92: local_loss: 10.001124382019043\n",
      "Step 93: local_loss: 9.09598445892334\n",
      "Step 94: local_loss: 9.080443382263184\n",
      "Step 95: local_loss: 8.944622993469238\n",
      "Step 96: local_loss: 9.23458480834961\n",
      "Step 97: local_loss: 8.99181079864502\n",
      "Step 98: local_loss: 9.076982498168945\n",
      "Step 99: local_loss: 9.421091079711914\n",
      "Center node 7312 finished at step 100 with loss 9.4211\n",
      "\n",
      "=== Pretraining Epoch 4 (Center Node: 6714) ===\n",
      "Step 0: local_loss: 10.225748062133789\n",
      "Step 1: local_loss: 9.491355895996094\n",
      "Step 2: local_loss: 10.91841983795166\n",
      "Step 3: local_loss: 9.092083930969238\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 4: local_loss: 10.53171157836914\n",
      "Step 5: local_loss: 12.329124450683594\n",
      "Step 6: local_loss: 9.784078598022461\n",
      "Step 7: local_loss: 10.862380981445312\n",
      "Step 8: local_loss: 11.451902389526367\n",
      "Step 9: local_loss: 11.50141429901123\n",
      "Step 10: local_loss: 12.306781768798828\n",
      "Step 11: local_loss: 7.876138687133789\n",
      "Step 12: local_loss: 8.954400062561035\n",
      "Step 13: local_loss: 10.548792839050293\n",
      "Step 14: local_loss: 11.244279861450195\n",
      "Step 15: local_loss: 10.740097045898438\n",
      "Step 16: local_loss: 8.520780563354492\n",
      "Step 17: local_loss: 7.187795162200928\n",
      "Step 18: local_loss: 9.56901741027832\n",
      "Step 19: local_loss: 11.68838119506836\n",
      "Step 20: local_loss: 8.311100959777832\n",
      "Step 21: local_loss: 8.962140083312988\n",
      "Step 22: local_loss: 6.8514509201049805\n",
      "Step 23: local_loss: 7.66294002532959\n",
      "Step 24: local_loss: 9.627362251281738\n",
      "Step 25: local_loss: 7.973191738128662\n",
      "Step 26: local_loss: 8.298808097839355\n",
      "Step 27: local_loss: 9.723210334777832\n",
      "Step 28: local_loss: 12.844444274902344\n",
      "Step 29: local_loss: 8.615443229675293\n",
      "Step 30: local_loss: 10.460441589355469\n",
      "Step 31: local_loss: 12.559935569763184\n",
      "Step 32: local_loss: 8.3851318359375\n",
      "Step 33: local_loss: 10.404035568237305\n",
      "Step 34: local_loss: 9.317116737365723\n",
      "Step 35: local_loss: 8.377985000610352\n",
      "Step 36: local_loss: 8.155181884765625\n",
      "Step 37: local_loss: 8.689825057983398\n",
      "Step 38: local_loss: 10.550247192382812\n",
      "Step 39: local_loss: 10.044807434082031\n",
      "Step 40: local_loss: 11.914514541625977\n",
      "Step 41: local_loss: 9.861894607543945\n",
      "Step 42: local_loss: 10.144719123840332\n",
      "Step 43: local_loss: 7.607274532318115\n",
      "Step 44: local_loss: 10.189752578735352\n",
      "Step 45: local_loss: 10.042669296264648\n",
      "Step 46: local_loss: 8.406848907470703\n",
      "Step 47: local_loss: 9.736811637878418\n",
      "Step 48: local_loss: 8.402435302734375\n",
      "Step 49: local_loss: 7.803723335266113\n",
      "Step 50: local_loss: 8.83526611328125\n",
      "Step 51: local_loss: 8.741397857666016\n",
      "Step 52: local_loss: 8.346949577331543\n",
      "Step 53: local_loss: 8.426973342895508\n",
      "Step 54: local_loss: 6.724251747131348\n",
      "Step 55: local_loss: 6.687627792358398\n",
      "Step 56: local_loss: 7.288273334503174\n",
      "Step 57: local_loss: 8.555282592773438\n",
      "Step 58: local_loss: 6.156269073486328\n",
      "Step 59: local_loss: 9.883903503417969\n",
      "Step 60: local_loss: 14.989938735961914\n",
      "Step 61: local_loss: 7.3550238609313965\n",
      "Step 62: local_loss: 13.474610328674316\n",
      "Step 63: local_loss: 7.905755996704102\n",
      "Step 64: local_loss: 6.088587760925293\n",
      "Step 65: local_loss: 16.695343017578125\n",
      "Step 66: local_loss: 15.323702812194824\n",
      "Step 67: local_loss: 7.00501012802124\n",
      "Step 68: local_loss: 16.101343154907227\n",
      "Step 69: local_loss: 15.205201148986816\n",
      "Step 70: local_loss: 10.910310745239258\n",
      "Step 71: local_loss: 8.993077278137207\n",
      "Step 72: local_loss: 8.015755653381348\n",
      "Step 73: local_loss: 6.7484846115112305\n",
      "Step 74: local_loss: 8.221559524536133\n",
      "Step 75: local_loss: 7.665933132171631\n",
      "Step 76: local_loss: 9.591204643249512\n",
      "Step 77: local_loss: 9.814814567565918\n",
      "Step 78: local_loss: 11.529511451721191\n",
      "Step 79: local_loss: 9.255658149719238\n",
      "Step 80: local_loss: 9.3305082321167\n",
      "Step 81: local_loss: 11.58002758026123\n",
      "Step 82: local_loss: 9.70950698852539\n",
      "Step 83: local_loss: 8.652060508728027\n",
      "Step 84: local_loss: 8.781816482543945\n",
      "Step 85: local_loss: 9.603877067565918\n",
      "Step 86: local_loss: 9.21610164642334\n",
      "Step 87: local_loss: 7.5999016761779785\n",
      "Step 88: local_loss: 9.915277481079102\n",
      "Step 89: local_loss: 8.905925750732422\n",
      "Step 90: local_loss: 8.838955879211426\n",
      "Step 91: local_loss: 8.155563354492188\n",
      "Step 92: local_loss: 7.626068115234375\n",
      "Step 93: local_loss: 8.007561683654785\n",
      "Step 94: local_loss: 8.296546936035156\n",
      "Step 95: local_loss: 9.501653671264648\n",
      "Step 96: local_loss: 7.979157447814941\n",
      "Step 97: local_loss: 8.553762435913086\n",
      "Step 98: local_loss: 9.404732704162598\n",
      "Step 99: local_loss: 11.584166526794434\n",
      "Center node 6714 finished at step 100 with loss 11.5842\n",
      "\n",
      "=== Pretraining Epoch 5 (Center Node: 7617) ===\n",
      "Step 0: local_loss: 13.037437438964844\n",
      "Step 1: local_loss: 17.19024658203125\n",
      "Step 2: local_loss: 17.803199768066406\n",
      "Step 3: local_loss: 18.831344604492188\n",
      "Step 4: local_loss: 14.432868957519531\n",
      "Step 5: local_loss: 18.385299682617188\n",
      "Step 6: local_loss: 20.228866577148438\n",
      "Step 7: local_loss: 18.52772331237793\n",
      "Step 8: local_loss: 18.189990997314453\n",
      "Step 9: local_loss: 19.884140014648438\n",
      "Step 10: local_loss: 15.02936840057373\n",
      "Step 11: local_loss: 16.447650909423828\n",
      "Step 12: local_loss: 17.011451721191406\n",
      "Step 13: local_loss: 14.731790542602539\n",
      "Step 14: local_loss: 17.210693359375\n",
      "Step 15: local_loss: 16.2391300201416\n",
      "Step 16: local_loss: 16.3844051361084\n",
      "Step 17: local_loss: 17.083251953125\n",
      "Step 18: local_loss: 16.616178512573242\n",
      "Step 19: local_loss: 14.424715995788574\n",
      "Step 20: local_loss: 16.815216064453125\n",
      "Step 21: local_loss: 18.93185806274414\n",
      "Step 22: local_loss: 13.662430763244629\n",
      "Step 23: local_loss: 15.981929779052734\n",
      "Step 24: local_loss: 14.926813125610352\n",
      "Step 25: local_loss: 18.399015426635742\n",
      "Step 26: local_loss: 17.44193458557129\n",
      "Step 27: local_loss: 15.255837440490723\n",
      "Step 28: local_loss: 15.404958724975586\n",
      "Step 29: local_loss: 13.088643074035645\n",
      "Step 30: local_loss: 15.268126487731934\n",
      "Step 31: local_loss: 13.382098197937012\n",
      "Step 32: local_loss: 17.430953979492188\n",
      "Step 33: local_loss: 16.248674392700195\n",
      "Step 34: local_loss: 12.485954284667969\n",
      "Step 35: local_loss: 17.102148056030273\n",
      "Step 36: local_loss: 17.07611846923828\n",
      "Step 37: local_loss: 15.027693748474121\n",
      "Step 38: local_loss: 16.107816696166992\n",
      "Step 39: local_loss: 15.612380027770996\n",
      "Step 40: local_loss: 14.580780982971191\n",
      "Step 41: local_loss: 13.581321716308594\n",
      "Step 42: local_loss: 14.772405624389648\n",
      "Step 43: local_loss: 14.212759017944336\n",
      "Step 44: local_loss: 17.8087100982666\n",
      "Step 45: local_loss: 10.641522407531738\n",
      "Step 46: local_loss: 14.118867874145508\n",
      "Step 47: local_loss: 13.266080856323242\n",
      "Step 48: local_loss: 12.972128868103027\n",
      "Step 49: local_loss: 12.454014778137207\n",
      "Step 50: local_loss: 15.32199478149414\n",
      "Step 51: local_loss: 11.786645889282227\n",
      "Step 52: local_loss: 11.280749320983887\n",
      "Step 53: local_loss: 14.585531234741211\n",
      "Step 54: local_loss: 14.800816535949707\n",
      "Step 55: local_loss: 12.053383827209473\n",
      "Step 56: local_loss: 13.068581581115723\n",
      "Step 57: local_loss: 13.084967613220215\n",
      "Step 58: local_loss: 12.221298217773438\n",
      "Step 59: local_loss: 14.35141372680664\n",
      "Step 60: local_loss: 10.173028945922852\n",
      "Step 61: local_loss: 15.84659481048584\n",
      "Step 62: local_loss: 13.488619804382324\n",
      "Step 63: local_loss: 12.069604873657227\n",
      "Step 64: local_loss: 14.331154823303223\n",
      "Step 65: local_loss: 13.780364990234375\n",
      "Step 66: local_loss: 12.152162551879883\n",
      "Step 67: local_loss: 13.472814559936523\n",
      "Step 68: local_loss: 11.692625999450684\n",
      "Step 69: local_loss: 13.193940162658691\n",
      "Step 70: local_loss: 14.188100814819336\n",
      "Step 71: local_loss: 14.493524551391602\n",
      "Step 72: local_loss: 15.185263633728027\n",
      "Step 73: local_loss: 13.965338706970215\n",
      "Step 74: local_loss: 12.36180305480957\n",
      "Step 75: local_loss: 9.960836410522461\n",
      "Step 76: local_loss: 12.877182006835938\n",
      "Step 77: local_loss: 13.42888355255127\n",
      "Step 78: local_loss: 13.450308799743652\n",
      "Step 79: local_loss: 16.11095428466797\n",
      "Step 80: local_loss: 13.014283180236816\n",
      "Step 81: local_loss: 18.86994743347168\n",
      "Step 82: local_loss: 13.467360496520996\n",
      "Step 83: local_loss: 13.14906120300293\n",
      "Step 84: local_loss: 11.51782512664795\n",
      "Step 85: local_loss: 12.14631175994873\n",
      "Step 86: local_loss: 11.009239196777344\n",
      "Step 87: local_loss: 13.135232925415039\n",
      "Step 88: local_loss: 10.60478401184082\n",
      "Step 89: local_loss: 14.483327865600586\n",
      "Step 90: local_loss: 12.189980506896973\n",
      "Step 91: local_loss: 14.823780059814453\n",
      "Step 92: local_loss: 16.224327087402344\n",
      "Step 93: local_loss: 13.673521995544434\n",
      "Step 94: local_loss: 12.102941513061523\n",
      "Step 95: local_loss: 13.624246597290039\n",
      "Step 96: local_loss: 15.156556129455566\n",
      "Step 97: local_loss: 12.341919898986816\n",
      "Step 98: local_loss: 12.517904281616211\n",
      "Step 99: local_loss: 14.46068286895752\n",
      "Center node 7617 finished at step 100 with loss 14.4607\n",
      "\n",
      "=== Pretraining Epoch 6 (Center Node: 297) ===\n",
      "Step 0: local_loss: 12.90583324432373\n",
      "Step 1: local_loss: 14.905290603637695\n",
      "Step 2: local_loss: 13.701567649841309\n",
      "Step 3: local_loss: 12.412507057189941\n",
      "Step 4: local_loss: 15.223517417907715\n",
      "Step 5: local_loss: 13.950122833251953\n",
      "Step 6: local_loss: 14.584826469421387\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 7: local_loss: 11.347424507141113\n",
      "Step 8: local_loss: 12.992358207702637\n",
      "Step 9: local_loss: 13.366048812866211\n",
      "Step 10: local_loss: 13.291609764099121\n",
      "Step 11: local_loss: 12.184608459472656\n",
      "Step 12: local_loss: 13.658440589904785\n",
      "Step 13: local_loss: 14.245928764343262\n",
      "Step 14: local_loss: 12.04153060913086\n",
      "Step 15: local_loss: 13.308830261230469\n",
      "Step 16: local_loss: 12.096667289733887\n",
      "Step 17: local_loss: 14.785066604614258\n",
      "Step 18: local_loss: 14.680047988891602\n",
      "Step 19: local_loss: 15.725667953491211\n",
      "Step 20: local_loss: 13.231932640075684\n",
      "Step 21: local_loss: 13.465392112731934\n",
      "Step 22: local_loss: 10.258857727050781\n",
      "Step 23: local_loss: 14.74690055847168\n",
      "Step 24: local_loss: 14.519699096679688\n",
      "Step 25: local_loss: 13.266250610351562\n",
      "Step 26: local_loss: 12.414070129394531\n",
      "Step 27: local_loss: 11.98022747039795\n",
      "Step 28: local_loss: 15.013325691223145\n",
      "Step 29: local_loss: 14.337992668151855\n",
      "Step 30: local_loss: 12.811017990112305\n",
      "Step 31: local_loss: 13.43238639831543\n",
      "Step 32: local_loss: 12.780192375183105\n",
      "Step 33: local_loss: 12.868799209594727\n",
      "Step 34: local_loss: 12.253166198730469\n",
      "Step 35: local_loss: 13.971607208251953\n",
      "Step 36: local_loss: 12.533782005310059\n",
      "Step 37: local_loss: 12.696002006530762\n",
      "Step 38: local_loss: 10.198037147521973\n",
      "Step 39: local_loss: 13.072569847106934\n",
      "Step 40: local_loss: 11.843391418457031\n",
      "Step 41: local_loss: 11.305428504943848\n",
      "Step 42: local_loss: 11.514513969421387\n",
      "Step 43: local_loss: 8.687065124511719\n",
      "Step 44: local_loss: 13.110005378723145\n",
      "Step 45: local_loss: 13.552085876464844\n",
      "Step 46: local_loss: 14.181018829345703\n",
      "Step 47: local_loss: 12.357112884521484\n",
      "Step 48: local_loss: 10.491992950439453\n",
      "Step 49: local_loss: 9.58849048614502\n",
      "Step 50: local_loss: 11.202964782714844\n",
      "Step 51: local_loss: 11.407807350158691\n",
      "Step 52: local_loss: 13.412734985351562\n",
      "Step 53: local_loss: 10.671000480651855\n",
      "Step 54: local_loss: 9.178324699401855\n",
      "Step 55: local_loss: 11.436535835266113\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 149\u001b[39m\n\u001b[32m    147\u001b[39m bp_node_features = []\n\u001b[32m    148\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bp_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m149\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    150\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    151\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 49\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     47\u001b[39m neighbors = \u001b[38;5;28mset\u001b[39m()\n\u001b[32m     48\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m49\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     50\u001b[39m     \u001b[38;5;66;03m# 找出以中心点为起点的边的终点\u001b[39;00m\n\u001b[32m     51\u001b[39m     rel_neighbors = edge_index[\u001b[32m1\u001b[39m][edge_index[\u001b[32m0\u001b[39m] == node_idx].tolist()\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 0717 test1\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    #     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 50,\n",
    "    \"pretrain_lr\": 0.0002,  # 0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []  # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []  # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []  # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []  # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []  # 每个epoch的平均全局损失\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "temperature = 0.3# 越小区分性越强\n",
    "loss_threshold = 0.5\n",
    "sample_size = 60\n",
    "max_steps_per_center = 100  # 防止死循环\n",
    "max_epochs = 10\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "\n",
    "    # temperature = temp_scheduler.get_temperature()\n",
    "    # print(f\"Current temperature: {temperature:.4f}\")\n",
    "\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.exp(-dist_row / temperature)\n",
    "\n",
    "    probs[center_idx] = 0\n",
    "    probs = probs / (probs.sum() + 1e-10)\n",
    "\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "\n",
    "        # 构建Bp子图\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = neighbors.tolist()\n",
    "\n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "\n",
    "        center_node_subgraph = create_node_subgraph(center_idx, feat_data, edge_indexs, device)\n",
    "        _, center_feature = bsne_model([center_node_subgraph])\n",
    "        center_feature = center_feature.squeeze(0)\n",
    "\n",
    "        #         # 构建Bu子图\n",
    "        #         if actual_sample_size > 0:\n",
    "        #             neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        #             bu_nodes = neighbors.tolist()\n",
    "\n",
    "        #         # 计算Bu子图中所有节点的特征\n",
    "        #         bu_node_features = []\n",
    "        #         for node_idx in bu_nodes:\n",
    "        #             node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "        #             _, node_feature = bsne_model([node_subgraph])\n",
    "        #             bu_node_features.append(node_feature.squeeze(0))\n",
    "        #         bu_features = torch.stack(bu_node_features)\n",
    "\n",
    "        # 计算loss\n",
    "        #         center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        #         other_bp_indices = bp_nodes\n",
    "        #         other_bp_features = bp_features\n",
    "\n",
    "        #         P = probs[bp_nodes]\n",
    "        P = torch.tensor(probs[bp_nodes], device=device, dtype=torch.float32)\n",
    "#         print(P)\n",
    "#         p_entropy = -torch.sum(P * torch.log(P)).item()\n",
    "#         print(f\"p_entropy: {p_entropy:.4f}\")\n",
    "\n",
    "\n",
    "        # 计算Q向量（欧式距离）\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), bp_features).squeeze(0)\n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "\n",
    "        #         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        # 原局部loss计算公式\n",
    "        #         log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "        #         loss_local = log_ratio.mean()\n",
    "\n",
    "        # 计算Q向量（点积）\n",
    "        #         center_norm = F.normalize(center_feature, dim=0)\n",
    "        #         other_norm = F.normalize(other_bp_features, dim=1)\n",
    "        #         feat_dists_bp = center_norm @ other_norm.t()\n",
    "        # #         Q = torch.softmax(feat_dists_bp.squeeze(0), dim=0)\n",
    "        #         Q = feat_dists_bp/feat_dists_bp.sum()\n",
    "\n",
    "        #         M = 0.5 * (P + Q)\n",
    "        #         loss_local = 0.5 * (P * torch.log(P/M) + Q * torch.log(Q/M)).sum()\n",
    "        #         log_ratio = torch.abs(torch.log(P / (Q + eps)))\n",
    "        log_ratio = (torch.log(P / Q)) ** 2\n",
    "        loss_local = log_ratio.mean()\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        #         all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        # #         unnorm_probs_global = np.exp(-all_dists)\n",
    "        #         unnorm_probs_global = np.power(10.0, -all_dists)\n",
    "        #         unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        #         # 计算全局归一化常数\n",
    "        #         Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        #         # 计算B_p节点的全局概率和\n",
    "        #         bp_global_prob_sum = unnorm_probs_global[bp_nodes].sum() / Z_global\n",
    "\n",
    "        #         # 计算k_Bp\n",
    "        #         N = dist_matrix.shape[0]\n",
    "        #         k_Bp = bp_global_prob_sum * (N / len(bp_nodes))\n",
    "\n",
    "        #         bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        #         feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "        #         sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        #         sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        #         global_ratio = (sum_e_bu / (sum_e_bp + eps))\n",
    "\n",
    "        #         loss_global = torch.abs(torch.log(global_ratio))\n",
    "        #         #加上平方项\n",
    "        #         loss_global = (torch.log(global_ratio.clamp(min=eps, max=1e10)))**2\n",
    "\n",
    "        #         total_loss += loss_local + loss_global\n",
    "        #         total_loss.backward()\n",
    "        total_loss = loss_local\n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        #         for name, parms in bsne_model.named_parameters():\n",
    "        #             grad_value = parms.grad\n",
    "        #             if grad_value is not None:\n",
    "        #                 grad_mean = torch.mean(grad_value)\n",
    "        #             else:\n",
    "        #                 grad_mean = \"No gradient\"\n",
    "\n",
    "        #             print(f'-->name: {name} '\n",
    "        #                   f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "        #                   f'-->grad_value: {grad_mean}')\n",
    "\n",
    "        #         print(\"P:\",P)\n",
    "        #         print(\"Q:\",Q)\n",
    "        #         print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        print(f\"Step {step}: local_loss: {loss_local.item()}\")\n",
    "        step += 1\n",
    "\n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "ac5746d9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "\n",
      "=== Pretraining Epoch 0 (Center Node: 268) ===\n",
      "Step 0: local_loss: 3.6358532905578613\n",
      "Step 1: local_loss: 3.9259538650512695\n",
      "Step 2: local_loss: 3.3336267471313477\n",
      "Step 3: local_loss: 4.007715702056885\n",
      "Step 4: local_loss: 3.3279731273651123\n",
      "Step 5: local_loss: 3.944762706756592\n",
      "Step 6: local_loss: 3.8306522369384766\n",
      "Step 7: local_loss: 4.242014408111572\n",
      "Step 8: local_loss: 4.174435615539551\n",
      "Step 9: local_loss: 3.716594696044922\n",
      "Step 10: local_loss: 3.798845052719116\n",
      "Step 11: local_loss: 3.363314151763916\n",
      "Step 12: local_loss: 3.471343994140625\n",
      "Step 13: local_loss: 2.667170763015747\n",
      "Step 14: local_loss: 4.008368492126465\n",
      "Step 15: local_loss: 3.7275636196136475\n",
      "Step 16: local_loss: 3.311511993408203\n",
      "Step 17: local_loss: 3.708942174911499\n",
      "Step 18: local_loss: 3.617483377456665\n",
      "Step 19: local_loss: 3.8019559383392334\n",
      "Step 20: local_loss: 3.760085344314575\n",
      "Step 21: local_loss: 3.213890790939331\n",
      "Step 22: local_loss: 4.001358985900879\n",
      "Step 23: local_loss: 3.9192159175872803\n",
      "Step 24: local_loss: 3.25270414352417\n",
      "Step 25: local_loss: 3.5941200256347656\n",
      "Step 26: local_loss: 3.9971768856048584\n",
      "Step 27: local_loss: 2.7168967723846436\n",
      "Step 28: local_loss: 4.150425910949707\n",
      "Step 29: local_loss: 3.9925193786621094\n",
      "Step 30: local_loss: 4.543891906738281\n",
      "Step 31: local_loss: 3.7189242839813232\n",
      "Step 32: local_loss: 3.879967451095581\n",
      "Step 33: local_loss: 3.4329185485839844\n",
      "Step 34: local_loss: 4.052698135375977\n",
      "Step 35: local_loss: 3.6404614448547363\n",
      "Step 36: local_loss: 3.525373697280884\n",
      "Step 37: local_loss: 3.650801181793213\n",
      "Step 38: local_loss: 3.202509641647339\n",
      "Step 39: local_loss: 3.4951069355010986\n",
      "Step 40: local_loss: 3.503079891204834\n",
      "Step 41: local_loss: 3.3860011100769043\n",
      "Step 42: local_loss: 3.159174680709839\n",
      "Step 43: local_loss: 3.6251187324523926\n",
      "Step 44: local_loss: 3.6749606132507324\n",
      "Step 45: local_loss: 3.7143378257751465\n",
      "Step 46: local_loss: 3.881572723388672\n",
      "Step 47: local_loss: 3.944488286972046\n",
      "Step 48: local_loss: 3.765169382095337\n",
      "Step 49: local_loss: 3.712892770767212\n",
      "Step 50: local_loss: 3.239304304122925\n",
      "Step 51: local_loss: 3.3968451023101807\n",
      "Step 52: local_loss: 3.7830426692962646\n",
      "Step 53: local_loss: 4.120019912719727\n",
      "Step 54: local_loss: 3.064897060394287\n",
      "Step 55: local_loss: 3.350266695022583\n",
      "Step 56: local_loss: 3.4180049896240234\n",
      "Step 57: local_loss: 3.4477756023406982\n",
      "Step 58: local_loss: 3.5138957500457764\n",
      "Step 59: local_loss: 3.9448041915893555\n",
      "Step 60: local_loss: 2.7455124855041504\n",
      "Step 61: local_loss: 3.987278938293457\n",
      "Step 62: local_loss: 3.265981912612915\n",
      "Step 63: local_loss: 3.257005453109741\n",
      "Step 64: local_loss: 3.5671463012695312\n",
      "Step 65: local_loss: 3.300159454345703\n",
      "Step 66: local_loss: 3.259227752685547\n",
      "Step 67: local_loss: 3.228060007095337\n",
      "Step 68: local_loss: 2.3243963718414307\n",
      "Step 69: local_loss: 2.5771679878234863\n",
      "Step 70: local_loss: 2.5986719131469727\n",
      "Step 71: local_loss: 3.7177577018737793\n",
      "Step 72: local_loss: 3.668240547180176\n",
      "Step 73: local_loss: 3.0700018405914307\n",
      "Step 74: local_loss: 3.082536458969116\n",
      "Step 75: local_loss: 3.513000965118408\n",
      "Step 76: local_loss: 3.199320077896118\n",
      "Step 77: local_loss: 3.125905990600586\n",
      "Step 78: local_loss: 3.3595213890075684\n",
      "Step 79: local_loss: 2.727952718734741\n",
      "Step 80: local_loss: 3.281339168548584\n",
      "Step 81: local_loss: 3.27081298828125\n",
      "Step 82: local_loss: 2.374666213989258\n",
      "Step 83: local_loss: 2.8287813663482666\n",
      "Step 84: local_loss: 2.9344730377197266\n",
      "Step 85: local_loss: 2.121411085128784\n",
      "Step 86: local_loss: 2.924738645553589\n",
      "Step 87: local_loss: 2.1924054622650146\n",
      "Step 88: local_loss: 2.8511831760406494\n",
      "Step 89: local_loss: 2.512213706970215\n",
      "Step 90: local_loss: 3.2348623275756836\n",
      "Step 91: local_loss: 3.6051459312438965\n",
      "Step 92: local_loss: 3.263524055480957\n",
      "Step 93: local_loss: 3.5000131130218506\n",
      "Step 94: local_loss: 3.445237874984741\n",
      "Step 95: local_loss: 3.2047266960144043\n",
      "Step 96: local_loss: 3.0534796714782715\n",
      "Step 97: local_loss: 3.562206268310547\n",
      "Step 98: local_loss: 3.381232976913452\n",
      "Step 99: local_loss: 3.8866801261901855\n",
      "Step 100: local_loss: 3.753464937210083\n",
      "Step 101: local_loss: 2.9979848861694336\n",
      "Step 102: local_loss: 2.435837507247925\n",
      "Step 103: local_loss: 2.8099308013916016\n",
      "Step 104: local_loss: 2.5636651515960693\n",
      "Step 105: local_loss: 3.27022385597229\n",
      "Step 106: local_loss: 3.550318479537964\n",
      "Step 113: local_loss: 3.2708890438079834\n",
      "Step 114: local_loss: 2.812655210494995\n",
      "Step 115: local_loss: 4.391737937927246\n",
      "Step 116: local_loss: 2.9335808753967285\n",
      "Step 117: local_loss: 3.439535617828369\n",
      "Step 118: local_loss: 3.9558072090148926\n",
      "Step 119: local_loss: 3.5725557804107666\n",
      "Step 120: local_loss: 3.4109139442443848\n",
      "Step 121: local_loss: 3.2524142265319824\n",
      "Step 122: local_loss: 3.0546720027923584\n",
      "Step 123: local_loss: 3.3409929275512695\n",
      "Step 124: local_loss: 2.6041460037231445\n",
      "Step 125: local_loss: 2.6220412254333496\n",
      "Step 126: local_loss: 3.1562283039093018\n",
      "Step 127: local_loss: 2.522498369216919\n",
      "Step 128: local_loss: 3.3850584030151367\n",
      "Step 129: local_loss: 2.701045513153076\n",
      "Step 130: local_loss: 2.892899513244629\n",
      "Step 131: local_loss: 2.2107467651367188\n",
      "Step 132: local_loss: 3.147146224975586\n",
      "Step 133: local_loss: 2.8498361110687256\n",
      "Step 134: local_loss: 3.224491596221924\n",
      "Step 135: local_loss: 3.4704885482788086\n",
      "Step 136: local_loss: 3.1733028888702393\n",
      "Step 137: local_loss: 3.1854631900787354\n",
      "Step 138: local_loss: 2.7800827026367188\n",
      "Step 139: local_loss: 3.071293830871582\n",
      "Step 140: local_loss: 3.241697072982788\n",
      "Step 141: local_loss: 3.152360200881958\n",
      "Step 142: local_loss: 2.332000970840454\n",
      "Step 143: local_loss: 3.276430606842041\n",
      "Step 144: local_loss: 3.758575677871704\n",
      "Step 145: local_loss: 2.805514335632324\n",
      "Step 146: local_loss: 2.932640314102173\n",
      "Step 147: local_loss: 2.999295234680176\n",
      "Step 148: local_loss: 3.325329065322876\n",
      "Step 149: local_loss: 2.7233340740203857\n",
      "Step 150: local_loss: 2.6271896362304688\n",
      "Step 151: local_loss: 2.11836314201355\n",
      "Step 152: local_loss: 2.9142746925354004\n",
      "Step 153: local_loss: 2.8182568550109863\n",
      "Step 154: local_loss: 2.3983583450317383\n",
      "Step 155: local_loss: 2.972187042236328\n",
      "Step 156: local_loss: 2.570019483566284\n",
      "Step 157: local_loss: 3.098923444747925\n",
      "Step 158: local_loss: 2.5058443546295166\n",
      "Step 159: local_loss: 3.5702874660491943\n",
      "Step 160: local_loss: 2.9794774055480957\n",
      "Step 161: local_loss: 2.2178986072540283\n",
      "Step 162: local_loss: 2.9319560527801514\n",
      "Step 163: local_loss: 3.1850175857543945\n",
      "Step 164: local_loss: 3.4608142375946045\n",
      "Step 165: local_loss: 2.6433987617492676\n",
      "Step 166: local_loss: 2.5748887062072754\n",
      "Step 167: local_loss: 2.782083511352539\n",
      "Step 168: local_loss: 2.5819270610809326\n",
      "Step 169: local_loss: 2.235259532928467\n",
      "Step 170: local_loss: 3.5022025108337402\n",
      "Step 171: local_loss: 3.618096351623535\n",
      "Step 172: local_loss: 3.0524704456329346\n",
      "Step 173: local_loss: 3.072310447692871\n",
      "Step 174: local_loss: 2.5254065990448\n",
      "Step 175: local_loss: 2.768432855606079\n",
      "Step 176: local_loss: 2.6608150005340576\n",
      "Step 177: local_loss: 2.654970407485962\n",
      "Step 178: local_loss: 2.7651500701904297\n",
      "Step 179: local_loss: 3.164799213409424\n",
      "Step 180: local_loss: 1.6915562152862549\n",
      "Step 181: local_loss: 2.568950891494751\n",
      "Step 182: local_loss: 2.845311164855957\n",
      "Step 183: local_loss: 2.797424793243408\n",
      "Step 184: local_loss: 4.283539772033691\n",
      "Step 185: local_loss: 3.703305244445801\n",
      "Step 186: local_loss: 3.2600297927856445\n",
      "Step 187: local_loss: 3.934044122695923\n",
      "Step 188: local_loss: 2.9862265586853027\n",
      "Step 189: local_loss: 2.493246078491211\n",
      "Step 190: local_loss: 3.421703338623047\n",
      "Step 191: local_loss: 2.3560121059417725\n",
      "Step 192: local_loss: 2.958249092102051\n",
      "Step 193: local_loss: 2.5545032024383545\n",
      "Step 194: local_loss: 2.925790548324585\n",
      "Step 195: local_loss: 3.929527521133423\n",
      "Step 196: local_loss: 2.6562349796295166\n",
      "Step 197: local_loss: 2.645648956298828\n",
      "Step 198: local_loss: 2.6394238471984863\n",
      "Step 199: local_loss: 3.988185405731201\n",
      "Center node 268 finished at step 200 with loss 3.9882\n",
      "\n",
      "=== Pretraining Epoch 1 (Center Node: 7572) ===\n",
      "Step 0: local_loss: 2.8340694904327393\n",
      "Step 1: local_loss: 3.107457160949707\n",
      "Step 2: local_loss: 1.9568324089050293\n",
      "Step 3: local_loss: 1.8665854930877686\n",
      "Step 4: local_loss: 2.2265162467956543\n",
      "Step 5: local_loss: 1.9196935892105103\n",
      "Step 6: local_loss: 2.3430330753326416\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 7: local_loss: 2.493530750274658\n",
      "Step 8: local_loss: 2.682053804397583\n",
      "Step 9: local_loss: 2.442410945892334\n",
      "Step 10: local_loss: 1.8335678577423096\n",
      "Step 11: local_loss: 2.5073904991149902\n",
      "Step 12: local_loss: 2.9418137073516846\n",
      "Step 13: local_loss: 2.6712582111358643\n",
      "Step 14: local_loss: 2.7378990650177\n",
      "Step 15: local_loss: 2.5777699947357178\n",
      "Step 16: local_loss: 2.562715768814087\n",
      "Step 17: local_loss: 2.4377408027648926\n",
      "Step 18: local_loss: 2.8497865200042725\n",
      "Step 19: local_loss: 2.5784592628479004\n",
      "Step 20: local_loss: 1.8392614126205444\n",
      "Step 21: local_loss: 2.3642079830169678\n",
      "Step 22: local_loss: 1.8266488313674927\n",
      "Step 23: local_loss: 1.6025688648223877\n",
      "Step 24: local_loss: 1.5577853918075562\n",
      "Step 25: local_loss: 2.5424511432647705\n",
      "Step 26: local_loss: 2.4118306636810303\n",
      "Step 27: local_loss: 2.1327149868011475\n",
      "Step 28: local_loss: 2.2890050411224365\n",
      "Step 29: local_loss: 3.056028366088867\n",
      "Step 30: local_loss: 2.469560146331787\n",
      "Step 31: local_loss: 2.211561679840088\n",
      "Step 32: local_loss: 2.0063834190368652\n",
      "Step 33: local_loss: 2.1717236042022705\n",
      "Step 34: local_loss: 2.320767879486084\n",
      "Step 35: local_loss: 2.3153908252716064\n",
      "Step 36: local_loss: 1.5393190383911133\n",
      "Step 37: local_loss: 2.2723865509033203\n",
      "Step 38: local_loss: 1.894918441772461\n",
      "Step 39: local_loss: 1.8784984350204468\n",
      "Step 40: local_loss: 2.8726823329925537\n",
      "Step 41: local_loss: 2.3054144382476807\n",
      "Step 42: local_loss: 2.5478975772857666\n",
      "Step 43: local_loss: 1.355515718460083\n",
      "Step 44: local_loss: 1.6435133218765259\n",
      "Step 45: local_loss: 2.4655988216400146\n",
      "Step 46: local_loss: 2.480783700942993\n",
      "Step 47: local_loss: 2.138761043548584\n",
      "Step 48: local_loss: 1.9735263586044312\n",
      "Step 49: local_loss: 2.451979160308838\n",
      "Step 50: local_loss: 2.811124801635742\n",
      "Step 51: local_loss: 2.6463160514831543\n",
      "Step 52: local_loss: 2.5168559551239014\n",
      "Step 53: local_loss: 1.8878164291381836\n",
      "Step 54: local_loss: 1.858498215675354\n",
      "Step 55: local_loss: 2.4286978244781494\n",
      "Step 56: local_loss: 2.521207809448242\n",
      "Step 57: local_loss: 2.56892728805542\n",
      "Step 58: local_loss: 3.031299352645874\n",
      "Step 59: local_loss: 2.55564022064209\n",
      "Step 60: local_loss: 2.163074016571045\n",
      "Step 61: local_loss: 2.245835781097412\n",
      "Step 62: local_loss: 1.9577428102493286\n",
      "Step 63: local_loss: 1.9041475057601929\n",
      "Step 64: local_loss: 2.488572835922241\n",
      "Step 65: local_loss: 1.9822627305984497\n",
      "Step 66: local_loss: 1.9764810800552368\n",
      "Step 67: local_loss: 2.523460626602173\n",
      "Step 68: local_loss: 1.5446693897247314\n",
      "Step 69: local_loss: 1.825788974761963\n",
      "Step 70: local_loss: 2.1584839820861816\n",
      "Step 71: local_loss: 2.012274742126465\n",
      "Step 72: local_loss: 2.838414192199707\n",
      "Step 73: local_loss: 1.058384895324707\n",
      "Step 74: local_loss: 2.5976052284240723\n",
      "Step 75: local_loss: 2.2764639854431152\n",
      "Step 76: local_loss: 1.948708415031433\n",
      "Step 77: local_loss: 1.9936256408691406\n",
      "Step 78: local_loss: 1.9417542219161987\n",
      "Step 79: local_loss: 2.1507246494293213\n",
      "Step 80: local_loss: 2.974843740463257\n",
      "Step 81: local_loss: 2.4950201511383057\n",
      "Step 82: local_loss: 2.108743190765381\n",
      "Step 83: local_loss: 1.9161568880081177\n",
      "Step 84: local_loss: 1.8023136854171753\n",
      "Step 85: local_loss: 2.5526347160339355\n",
      "Step 86: local_loss: 2.1076676845550537\n",
      "Step 87: local_loss: 1.540798306465149\n",
      "Step 88: local_loss: 2.004889488220215\n",
      "Step 89: local_loss: 2.4562201499938965\n",
      "Step 90: local_loss: 2.3129334449768066\n",
      "Step 91: local_loss: 2.1927497386932373\n",
      "Step 92: local_loss: 2.4393861293792725\n",
      "Step 93: local_loss: 2.0488040447235107\n",
      "Step 94: local_loss: 2.7505900859832764\n",
      "Step 95: local_loss: 2.1404762268066406\n",
      "Step 96: local_loss: 1.5747476816177368\n",
      "Step 97: local_loss: 2.8977959156036377\n",
      "Step 98: local_loss: 1.7028166055679321\n",
      "Step 99: local_loss: 2.318636894226074\n",
      "Step 100: local_loss: 1.7464847564697266\n",
      "Step 101: local_loss: 2.902926445007324\n",
      "Step 102: local_loss: 1.5820058584213257\n",
      "Step 103: local_loss: 2.732570171356201\n",
      "Step 104: local_loss: 2.753523349761963\n",
      "Step 105: local_loss: 2.1563198566436768\n",
      "Step 106: local_loss: 2.364187240600586\n",
      "Step 107: local_loss: 2.0935702323913574\n",
      "Step 108: local_loss: 1.8963407278060913\n",
      "Step 109: local_loss: 2.8953239917755127\n",
      "Step 110: local_loss: 2.282871961593628\n",
      "Step 111: local_loss: 2.68896484375\n",
      "Step 112: local_loss: 1.6858662366867065\n",
      "Step 113: local_loss: 2.468134641647339\n",
      "Step 114: local_loss: 3.3102970123291016\n",
      "Step 115: local_loss: 2.0765860080718994\n",
      "Step 116: local_loss: 1.9437150955200195\n",
      "Step 117: local_loss: 3.42303729057312\n",
      "Step 118: local_loss: 1.6130847930908203\n",
      "Step 119: local_loss: 1.9056929349899292\n",
      "Step 120: local_loss: 2.5175600051879883\n",
      "Step 121: local_loss: 2.2610607147216797\n",
      "Step 122: local_loss: 2.4470083713531494\n",
      "Step 123: local_loss: 2.4143388271331787\n",
      "Step 124: local_loss: 2.206880807876587\n",
      "Step 125: local_loss: 2.1336004734039307\n",
      "Step 126: local_loss: 2.0716075897216797\n",
      "Step 127: local_loss: 2.5598080158233643\n",
      "Step 128: local_loss: 1.7907551527023315\n",
      "Step 129: local_loss: 1.8623286485671997\n",
      "Step 130: local_loss: 1.6889605522155762\n",
      "Step 131: local_loss: 2.491103172302246\n",
      "Step 132: local_loss: 1.6394513845443726\n",
      "Step 133: local_loss: 2.6452620029449463\n",
      "Step 134: local_loss: 2.2431094646453857\n",
      "Step 135: local_loss: 2.0535097122192383\n",
      "Step 136: local_loss: 2.5806822776794434\n",
      "Step 137: local_loss: 1.9114601612091064\n",
      "Step 138: local_loss: 2.015838623046875\n",
      "Step 139: local_loss: 2.4990296363830566\n",
      "Step 140: local_loss: 2.2458572387695312\n",
      "Step 141: local_loss: 2.5371248722076416\n",
      "Step 142: local_loss: 1.6310532093048096\n",
      "Step 143: local_loss: 2.603895425796509\n",
      "Step 144: local_loss: 1.7817782163619995\n",
      "Step 145: local_loss: 2.5019659996032715\n",
      "Step 146: local_loss: 2.135833263397217\n",
      "Step 147: local_loss: 1.7592697143554688\n",
      "Step 148: local_loss: 2.0098161697387695\n",
      "Step 149: local_loss: 2.2302398681640625\n",
      "Step 150: local_loss: 2.5967934131622314\n",
      "Step 151: local_loss: 2.451564311981201\n",
      "Step 152: local_loss: 1.6907185316085815\n",
      "Step 153: local_loss: 3.0379831790924072\n",
      "Step 154: local_loss: 2.307652473449707\n",
      "Step 155: local_loss: 2.053908586502075\n",
      "Step 156: local_loss: 2.5901360511779785\n",
      "Step 157: local_loss: 1.5641710758209229\n",
      "Step 158: local_loss: 2.241009473800659\n",
      "Step 159: local_loss: 2.3170206546783447\n",
      "Step 160: local_loss: 2.1391022205352783\n",
      "Step 161: local_loss: 2.2984986305236816\n",
      "Step 162: local_loss: 1.8296343088150024\n",
      "Step 163: local_loss: 2.2865712642669678\n",
      "Step 164: local_loss: 2.356426477432251\n",
      "Step 165: local_loss: 1.8639318943023682\n",
      "Step 166: local_loss: 2.3076510429382324\n",
      "Step 167: local_loss: 2.4970383644104004\n",
      "Step 168: local_loss: 2.080305576324463\n",
      "Step 169: local_loss: 2.6433799266815186\n",
      "Step 170: local_loss: 3.3271071910858154\n",
      "Step 171: local_loss: 2.2986900806427\n",
      "Step 172: local_loss: 2.082693576812744\n",
      "Step 173: local_loss: 1.8958616256713867\n",
      "Step 174: local_loss: 2.329411029815674\n",
      "Step 175: local_loss: 2.9964215755462646\n",
      "Step 176: local_loss: 2.337406635284424\n",
      "Step 177: local_loss: 2.421424388885498\n",
      "Step 178: local_loss: 2.614440441131592\n",
      "Step 179: local_loss: 1.9058579206466675\n",
      "Step 180: local_loss: 2.367480516433716\n",
      "Step 181: local_loss: 2.1025631427764893\n",
      "Step 182: local_loss: 2.2180657386779785\n",
      "Step 183: local_loss: 2.9149329662323\n",
      "Step 184: local_loss: 2.0739738941192627\n",
      "Step 185: local_loss: 2.408322811126709\n",
      "Step 186: local_loss: 1.8854033946990967\n",
      "Step 187: local_loss: 2.399334669113159\n",
      "Step 188: local_loss: 2.118286609649658\n",
      "Step 189: local_loss: 2.481227159500122\n",
      "Step 190: local_loss: 2.0284035205841064\n",
      "Step 191: local_loss: 2.1426684856414795\n",
      "Step 192: local_loss: 2.5059916973114014\n",
      "Step 193: local_loss: 2.4572620391845703\n",
      "Step 194: local_loss: 2.4503397941589355\n",
      "Step 195: local_loss: 1.8717416524887085\n",
      "Step 196: local_loss: 2.0207180976867676\n",
      "Step 197: local_loss: 1.430837869644165\n",
      "Step 198: local_loss: 1.6132607460021973\n",
      "Step 199: local_loss: 2.3331472873687744\n",
      "Center node 7572 finished at step 200 with loss 2.3331\n",
      "\n",
      "=== Pretraining Epoch 2 (Center Node: 7964) ===\n",
      "Step 0: local_loss: 3.233855962753296\n",
      "Step 1: local_loss: 2.6165289878845215\n",
      "Step 2: local_loss: 1.757665753364563\n",
      "Step 3: local_loss: 2.0915679931640625\n",
      "Step 4: local_loss: 2.315162181854248\n",
      "Step 5: local_loss: 2.273066282272339\n",
      "Step 6: local_loss: 2.712345600128174\n",
      "Step 7: local_loss: 2.9812371730804443\n",
      "Step 8: local_loss: 2.293410301208496\n",
      "Step 9: local_loss: 2.563920021057129\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Step 10: local_loss: 2.2937495708465576\n",
      "Step 11: local_loss: 2.16292667388916\n",
      "Step 12: local_loss: 1.9241399765014648\n",
      "Step 13: local_loss: 2.046186923980713\n",
      "Step 14: local_loss: 2.39870285987854\n",
      "Step 15: local_loss: 2.757202386856079\n",
      "Step 16: local_loss: 2.920109510421753\n",
      "Step 17: local_loss: 3.0496482849121094\n",
      "Step 18: local_loss: 1.7907227277755737\n",
      "Step 19: local_loss: 2.3083250522613525\n",
      "Step 20: local_loss: 1.7964447736740112\n",
      "Step 21: local_loss: 1.7266284227371216\n",
      "Step 22: local_loss: 2.6616013050079346\n",
      "Step 23: local_loss: 2.1291236877441406\n",
      "Step 24: local_loss: 2.0858471393585205\n",
      "Step 25: local_loss: 2.1099119186401367\n",
      "Step 26: local_loss: 2.943612813949585\n",
      "Step 27: local_loss: 2.6774559020996094\n",
      "Step 28: local_loss: 2.661754608154297\n",
      "Step 29: local_loss: 3.1836888790130615\n",
      "Step 30: local_loss: 2.4735653400421143\n",
      "Step 31: local_loss: 2.1410624980926514\n",
      "Step 32: local_loss: 1.6225301027297974\n",
      "Step 33: local_loss: 3.0702741146087646\n",
      "Step 34: local_loss: 2.7727737426757812\n",
      "Step 35: local_loss: 3.237659454345703\n",
      "Step 36: local_loss: 1.4258207082748413\n",
      "Step 37: local_loss: 2.2012734413146973\n",
      "Step 38: local_loss: 2.794013500213623\n",
      "Step 39: local_loss: 1.7673873901367188\n",
      "Step 40: local_loss: 3.036872625350952\n",
      "Step 41: local_loss: 2.7582552433013916\n",
      "Step 42: local_loss: 2.9723761081695557\n",
      "Step 43: local_loss: 2.0457000732421875\n",
      "Step 44: local_loss: 1.8911216259002686\n",
      "Step 45: local_loss: 1.9401975870132446\n",
      "Step 46: local_loss: 2.3524980545043945\n",
      "Step 47: local_loss: 2.739957332611084\n",
      "Step 48: local_loss: 2.4098093509674072\n",
      "Step 49: local_loss: 2.3294262886047363\n",
      "Step 50: local_loss: 1.0923690795898438\n",
      "Step 51: local_loss: 2.1940019130706787\n",
      "Step 52: local_loss: 2.5987260341644287\n",
      "Step 53: local_loss: 2.4150993824005127\n",
      "Step 54: local_loss: 1.9518245458602905\n",
      "Step 55: local_loss: 2.357144594192505\n",
      "Step 56: local_loss: 2.5341997146606445\n",
      "Step 57: local_loss: 2.641939401626587\n",
      "Step 58: local_loss: 1.3288697004318237\n",
      "Step 59: local_loss: 1.857050895690918\n",
      "Step 60: local_loss: 2.619901180267334\n",
      "Step 61: local_loss: 2.730189800262451\n",
      "Step 62: local_loss: 3.0324997901916504\n",
      "Step 63: local_loss: 3.058912754058838\n",
      "Step 64: local_loss: 1.7133532762527466\n",
      "Step 65: local_loss: 2.8483355045318604\n",
      "Step 66: local_loss: 2.2747464179992676\n",
      "Step 67: local_loss: 2.612441062927246\n",
      "Step 68: local_loss: 1.7758525609970093\n",
      "Step 69: local_loss: 2.1142444610595703\n",
      "Step 70: local_loss: 2.138166904449463\n",
      "Step 71: local_loss: 1.8346832990646362\n",
      "Step 72: local_loss: 2.0781562328338623\n",
      "Step 73: local_loss: 2.1082875728607178\n",
      "Step 74: local_loss: 2.634302854537964\n",
      "Step 75: local_loss: 3.341951370239258\n",
      "Step 76: local_loss: 2.505103588104248\n",
      "Step 77: local_loss: 2.8253772258758545\n",
      "Step 78: local_loss: 2.7361762523651123\n",
      "Step 79: local_loss: 2.366201639175415\n",
      "Step 80: local_loss: 2.3630809783935547\n",
      "Step 81: local_loss: 1.8945521116256714\n",
      "Step 82: local_loss: 2.3258111476898193\n",
      "Step 83: local_loss: 1.944190263748169\n",
      "Step 84: local_loss: 1.8045315742492676\n",
      "Step 85: local_loss: 2.2271950244903564\n",
      "Step 86: local_loss: 2.7802114486694336\n",
      "Step 87: local_loss: 2.7339673042297363\n",
      "Step 88: local_loss: 3.040093183517456\n",
      "Step 89: local_loss: 2.4629342555999756\n",
      "Step 90: local_loss: 2.451380729675293\n",
      "Step 91: local_loss: 1.545966625213623\n",
      "Step 92: local_loss: 2.130373001098633\n",
      "Step 93: local_loss: 2.872776985168457\n",
      "Step 94: local_loss: 2.050196409225464\n",
      "Step 95: local_loss: 2.880800485610962\n",
      "Step 96: local_loss: 2.6532680988311768\n",
      "Step 97: local_loss: 2.4261629581451416\n",
      "Step 98: local_loss: 2.2678048610687256\n",
      "Step 99: local_loss: 3.0102577209472656\n",
      "Step 100: local_loss: 1.1424247026443481\n",
      "Step 101: local_loss: 2.6472718715667725\n",
      "Step 102: local_loss: 1.9172402620315552\n",
      "Step 103: local_loss: 2.3187973499298096\n",
      "Step 104: local_loss: 1.8793233633041382\n",
      "Step 105: local_loss: 2.9073736667633057\n",
      "Step 106: local_loss: 2.4781718254089355\n",
      "Step 107: local_loss: 2.5848214626312256\n",
      "Step 108: local_loss: 3.197488307952881\n",
      "Step 109: local_loss: 2.1786534786224365\n",
      "Step 110: local_loss: 1.5134590864181519\n",
      "Step 111: local_loss: 2.1036057472229004\n",
      "Step 112: local_loss: 2.750636339187622\n",
      "Step 113: local_loss: 2.36033034324646\n",
      "Step 114: local_loss: 2.5148723125457764\n",
      "Step 115: local_loss: 3.1847047805786133\n",
      "Step 116: local_loss: 1.323874831199646\n",
      "Step 117: local_loss: 2.163198471069336\n",
      "Step 118: local_loss: 2.106982707977295\n",
      "Step 119: local_loss: 2.2260146141052246\n",
      "Step 120: local_loss: 2.0855555534362793\n",
      "Step 121: local_loss: 2.0047736167907715\n",
      "Step 122: local_loss: 1.9087663888931274\n",
      "Step 123: local_loss: 2.0982773303985596\n",
      "Step 124: local_loss: 1.7929140329360962\n",
      "Step 125: local_loss: 2.2907350063323975\n",
      "Step 126: local_loss: 1.4770026206970215\n",
      "Step 127: local_loss: 2.762395143508911\n",
      "Step 128: local_loss: 2.6154470443725586\n",
      "Step 129: local_loss: 1.7850782871246338\n",
      "Step 130: local_loss: 2.1032097339630127\n",
      "Step 131: local_loss: 2.6369996070861816\n",
      "Step 132: local_loss: 1.9451384544372559\n",
      "Step 133: local_loss: 1.8307712078094482\n",
      "Step 134: local_loss: 2.144026756286621\n",
      "Step 135: local_loss: 1.8530912399291992\n",
      "Step 136: local_loss: 1.8912023305892944\n",
      "Step 137: local_loss: 1.7049567699432373\n",
      "Step 138: local_loss: 2.449650526046753\n",
      "Step 139: local_loss: 2.0188660621643066\n",
      "Step 140: local_loss: 2.6648316383361816\n",
      "Step 141: local_loss: 2.466991662979126\n",
      "Step 142: local_loss: 1.835349678993225\n",
      "Step 143: local_loss: 2.110426664352417\n",
      "Step 144: local_loss: 2.952763557434082\n",
      "Step 145: local_loss: 2.19954776763916\n",
      "Step 146: local_loss: 2.072096109390259\n",
      "Step 147: local_loss: 1.5183943510055542\n",
      "Step 148: local_loss: 2.6634397506713867\n",
      "Step 149: local_loss: 2.5927131175994873\n",
      "Step 150: local_loss: 2.2464351654052734\n",
      "Step 151: local_loss: 2.3684184551239014\n",
      "Step 152: local_loss: 2.181699752807617\n",
      "Step 153: local_loss: 2.4425644874572754\n",
      "Step 154: local_loss: 2.2738583087921143\n",
      "Step 155: local_loss: 2.1601531505584717\n",
      "Step 156: local_loss: 2.794926404953003\n",
      "Step 157: local_loss: 1.4834580421447754\n",
      "Step 158: local_loss: 2.020303726196289\n",
      "Step 159: local_loss: 2.0853354930877686\n",
      "Step 160: local_loss: 2.567122220993042\n",
      "Step 161: local_loss: 2.161465644836426\n",
      "Step 162: local_loss: 2.423646926879883\n",
      "Step 163: local_loss: 1.6116641759872437\n",
      "Step 164: local_loss: 2.5719363689422607\n",
      "Step 165: local_loss: 2.165623188018799\n",
      "Step 166: local_loss: 2.777599334716797\n",
      "Step 167: local_loss: 1.873062252998352\n",
      "Step 168: local_loss: 2.161634922027588\n",
      "Step 169: local_loss: 1.520270586013794\n",
      "Step 170: local_loss: 1.9497746229171753\n",
      "Step 171: local_loss: 2.148625612258911\n",
      "Step 172: local_loss: 1.631911277770996\n",
      "Step 173: local_loss: 1.794289469718933\n",
      "Step 174: local_loss: 2.623676300048828\n",
      "Step 175: local_loss: 2.921018123626709\n",
      "Step 176: local_loss: 2.0750553607940674\n",
      "Step 177: local_loss: 1.7721341848373413\n",
      "Step 178: local_loss: 1.8858996629714966\n",
      "Step 179: local_loss: 1.87046217918396\n",
      "Step 180: local_loss: 1.9195318222045898\n",
      "Step 181: local_loss: 3.103024959564209\n",
      "Step 182: local_loss: 2.416499614715576\n",
      "Step 183: local_loss: 1.4525235891342163\n",
      "Step 184: local_loss: 2.737492084503174\n",
      "Step 185: local_loss: 2.177424430847168\n",
      "Step 186: local_loss: 2.7832698822021484\n",
      "Step 187: local_loss: 1.9713019132614136\n",
      "Step 188: local_loss: 1.8483554124832153\n",
      "Step 189: local_loss: 1.5283509492874146\n",
      "Step 190: local_loss: 2.281158208847046\n",
      "Step 191: local_loss: 2.1607320308685303\n",
      "Step 192: local_loss: 2.006211042404175\n",
      "Step 193: local_loss: 2.5166170597076416\n",
      "Step 194: local_loss: 2.5848677158355713\n",
      "Step 195: local_loss: 1.6370548009872437\n",
      "Step 196: local_loss: 2.353562831878662\n",
      "Step 197: local_loss: 2.7286484241485596\n",
      "Step 198: local_loss: 2.4100654125213623\n",
      "Step 199: local_loss: 2.1221272945404053\n",
      "Center node 7964 finished at step 200 with loss 2.1221\n",
      "\n",
      "=== Pretraining Epoch 3 (Center Node: 7312) ===\n",
      "Step 0: local_loss: 2.0144309997558594\n",
      "Step 1: local_loss: 2.8446717262268066\n",
      "Step 2: local_loss: 3.1784985065460205\n",
      "Step 3: local_loss: 1.5448025465011597\n",
      "Step 4: local_loss: 2.0138328075408936\n",
      "Step 5: local_loss: 2.474931478500366\n",
      "Step 6: local_loss: 1.986982822418213\n",
      "Step 7: local_loss: 3.66076922416687\n",
      "Step 8: local_loss: 2.3202850818634033\n",
      "Step 9: local_loss: 1.6663469076156616\n",
      "Step 10: local_loss: 2.259392738342285\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[23]\u001b[39m\u001b[32m, line 150\u001b[39m\n\u001b[32m    148\u001b[39m bp_node_features = []\n\u001b[32m    149\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node_idx \u001b[38;5;129;01min\u001b[39;00m bp_nodes:\n\u001b[32m--> \u001b[39m\u001b[32m150\u001b[39m     node_subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnode_idx\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    151\u001b[39m     _, node_feature = bsne_model([node_subgraph])\n\u001b[32m    152\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 49\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     47\u001b[39m neighbors = \u001b[38;5;28mset\u001b[39m()\n\u001b[32m     48\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m49\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     50\u001b[39m     \u001b[38;5;66;03m# 找出以中心点为起点的边的终点\u001b[39;00m\n\u001b[32m     51\u001b[39m     rel_neighbors = edge_index[\u001b[32m1\u001b[39m][edge_index[\u001b[32m0\u001b[39m] == node_idx].tolist()\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 0717 test2\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    #     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 50,\n",
    "    \"pretrain_lr\": 0.001,  # 0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []  # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []  # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []  # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []  # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []  # 每个epoch的平均全局损失\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "temperature = 0.3# 越小区分性越强\n",
    "loss_threshold = 0.5\n",
    "sample_size = 60\n",
    "max_steps_per_center = 200  # 防止死循环\n",
    "max_epochs = 6\n",
    "center_indices = list(range(feat_data.shape[0]))\n",
    "# 在每轮epoch前随机打乱中心点顺序\n",
    "rd.shuffle(center_indices)\n",
    "\n",
    "# 限制训练的中心点数量\n",
    "center_indices = center_indices[:max_epochs]\n",
    "\n",
    "for epoch, center_idx in enumerate(center_indices):\n",
    "    print(f\"\\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===\")\n",
    "    step = 0\n",
    "\n",
    "    # temperature = temp_scheduler.get_temperature()\n",
    "    # print(f\"Current temperature: {temperature:.4f}\")\n",
    "\n",
    "    dist_row = dist_matrix[center_idx].cpu().numpy()\n",
    "    probs = np.exp(-dist_row / temperature)\n",
    "\n",
    "    probs[center_idx] = 0\n",
    "    probs = probs / (probs.sum() + 1e-10)\n",
    "\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    while True:\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        total_loss = 0.0  # 每个step都要重置\n",
    "        eps = 1e-10\n",
    "\n",
    "        # 构建Bp子图\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            bp_nodes = neighbors.tolist()\n",
    "\n",
    "        # 计算Bp子图中所有节点的特征\n",
    "        bp_node_features = []\n",
    "        for node_idx in bp_nodes:\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "        bp_features = torch.stack(bp_node_features)\n",
    "\n",
    "        center_node_subgraph = create_node_subgraph(center_idx, feat_data, edge_indexs, device)\n",
    "        _, center_feature = bsne_model([center_node_subgraph])\n",
    "        center_feature = center_feature.squeeze(0)\n",
    "\n",
    "        #         # 构建Bu子图\n",
    "        #         if actual_sample_size > 0:\n",
    "        #             neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        #             bu_nodes = neighbors.tolist()\n",
    "\n",
    "        #         # 计算Bu子图中所有节点的特征\n",
    "        #         bu_node_features = []\n",
    "        #         for node_idx in bu_nodes:\n",
    "        #             node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "        #             _, node_feature = bsne_model([node_subgraph])\n",
    "        #             bu_node_features.append(node_feature.squeeze(0))\n",
    "        #         bu_features = torch.stack(bu_node_features)\n",
    "\n",
    "        # 计算loss\n",
    "        #         center_feature = bp_features[0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        #         other_bp_indices = bp_nodes\n",
    "        #         other_bp_features = bp_features\n",
    "\n",
    "        #         P = probs[bp_nodes]\n",
    "        P = torch.tensor(probs[bp_nodes], device=device, dtype=torch.float32)\n",
    "        P = P/P.sum()\n",
    "#         print(P)\n",
    "#         p_entropy = -torch.sum(P * torch.log(P)).item()\n",
    "#         print(f\"p_entropy: {p_entropy:.4f}\")\n",
    "\n",
    "\n",
    "        # 计算Q向量（欧式距离）\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), bp_features).squeeze(0)\n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "\n",
    "        #         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        # 原局部loss计算公式\n",
    "        #         log_ratio = torch.log(((P + eps)/(Q + eps)))\n",
    "        #         loss_local = log_ratio.mean()\n",
    "\n",
    "        # 计算Q向量（点积）\n",
    "        #         center_norm = F.normalize(center_feature, dim=0)\n",
    "        #         other_norm = F.normalize(other_bp_features, dim=1)\n",
    "        #         feat_dists_bp = center_norm @ other_norm.t()\n",
    "        # #         Q = torch.softmax(feat_dists_bp.squeeze(0), dim=0)\n",
    "        #         Q = feat_dists_bp/feat_dists_bp.sum()\n",
    "\n",
    "        #         M = 0.5 * (P + Q)\n",
    "        #         loss_local = 0.5 * (P * torch.log(P/M) + Q * torch.log(Q/M)).sum()\n",
    "        #         log_ratio = torch.abs(torch.log(P / (Q + eps)))\n",
    "        \n",
    "        \n",
    "        log_ratio = (torch.log(P / Q)) ** 2\n",
    "#         log_ratio = -torch.log(Q)\n",
    "#         log_ratio = (torch.log(P / Q))\n",
    "        loss_local = log_ratio.mean()\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        #         all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        # #         unnorm_probs_global = np.exp(-all_dists)\n",
    "        #         unnorm_probs_global = np.power(10.0, -all_dists)\n",
    "        #         unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        #         # 计算全局归一化常数\n",
    "        #         Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        #         # 计算B_p节点的全局概率和\n",
    "        #         bp_global_prob_sum = unnorm_probs_global[bp_nodes].sum() / Z_global\n",
    "\n",
    "        #         # 计算k_Bp\n",
    "        #         N = dist_matrix.shape[0]\n",
    "        #         k_Bp = bp_global_prob_sum * (N / len(bp_nodes))\n",
    "\n",
    "        #         bu_features_ = bu_features[1:]  # 排除中心节点\n",
    "        #         feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "        #         sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        #         sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        #         global_ratio = (sum_e_bu / (sum_e_bp + eps))\n",
    "\n",
    "        #         loss_global = torch.abs(torch.log(global_ratio))\n",
    "        #         #加上平方项\n",
    "        #         loss_global = (torch.log(global_ratio.clamp(min=eps, max=1e10)))**2\n",
    "\n",
    "        #         total_loss += loss_local + loss_global\n",
    "        #         total_loss.backward()\n",
    "        total_loss = loss_local\n",
    "        total_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        #         for name, parms in bsne_model.named_parameters():\n",
    "        #             grad_value = parms.grad\n",
    "        #             if grad_value is not None:\n",
    "        #                 grad_mean = torch.mean(grad_value)\n",
    "        #             else:\n",
    "        #                 grad_mean = \"No gradient\"\n",
    "\n",
    "        #             print(f'-->name: {name} '\n",
    "        #                   f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "        #                   f'-->grad_value: {grad_mean}')\n",
    "\n",
    "        #         print(\"P:\",P)\n",
    "        #         print(\"Q:\",Q)\n",
    "        #         print(f\"Step {step}: BSNE_Loss={total_loss.item():.4f},local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        print(f\"Step {step}: local_loss: {loss_local.item()}\")\n",
    "        step += 1\n",
    "\n",
    "        if total_loss.item() < loss_threshold or step >= max_steps_per_center:\n",
    "            print(f\"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}\")\n",
    "            break\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "541c560d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0602, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021,\n",
       "        0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0602, 0.0602, 0.0021, 0.0021,\n",
       "        0.0602, 0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0602, 0.0021, 0.0021,\n",
       "        0.0021, 0.0021, 0.0602, 0.0021, 0.0021, 0.0021, 0.0602, 0.0021, 0.0021,\n",
       "        0.0021, 0.0021, 0.0021, 0.0021, 0.0021, 0.0602, 0.0602, 0.0021, 0.0602,\n",
       "        0.0021, 0.0021, 0.0602, 0.0021, 0.0021, 0.0602, 0.0021, 0.0602, 0.0021,\n",
       "        0.0021, 0.0602, 0.0021, 0.0021, 0.0602, 0.0021], device='cuda:0')"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "P"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "0f1eaf5d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0046, 0.0027, 0.0094, 0.0113, 0.0058, 0.0094, 0.0152, 0.0040, 0.0041,\n",
       "        0.0055, 0.0657, 0.0063, 0.0137, 0.0030, 0.0193, 0.0295, 0.0018, 0.0068,\n",
       "        0.0417, 0.0199, 0.0064, 0.0018, 0.0326, 0.0024, 0.0849, 0.0153, 0.0185,\n",
       "        0.0144, 0.0101, 0.0302, 0.0082, 0.0050, 0.0108, 0.0140, 0.0126, 0.0093,\n",
       "        0.0186, 0.0104, 0.0253, 0.0364, 0.0308, 0.0056, 0.0215, 0.0149, 0.0085,\n",
       "        0.0065, 0.0211, 0.0375, 0.0083, 0.0156, 0.0333, 0.0061, 0.0336, 0.0121,\n",
       "        0.0161, 0.0347, 0.0011, 0.0101, 0.0037, 0.0319], device='cuda:0',\n",
       "       grad_fn=<SoftmaxBackward0>)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Q"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "bc89909a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1., device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Q.sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "6916cf21",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([ 2.5694, -0.2311, -1.4755, -1.6628, -0.9876, -1.4793, -1.9551, -0.6233,\n",
       "        -0.6478, -0.9479, -3.4210, -1.0786, -1.8557, -0.3272,  1.1390,  0.7152,\n",
       "         0.1907, -1.1539,  0.3671, -2.2245, -1.0840,  0.1650, -2.7206, -0.1135,\n",
       "        -0.3429, -1.9642, -2.1558, -1.9046, -1.5505,  0.6889, -1.3428, -0.8366,\n",
       "        -1.6189,  1.4565, -1.7708, -1.4613, -2.1576, -1.5819, -2.4665, -2.8296,\n",
       "        -2.6619,  2.3813,  1.0300, -1.9363,  1.9582, -1.0999, -2.2822,  0.4740,\n",
       "        -1.3551, -1.9811,  0.5929, -1.0368,  0.5842, -1.7319, -2.0168,  0.5518,\n",
       "         0.6389, -1.5462,  2.7946, -2.6981], device='cuda:0',\n",
       "       grad_fn=<LogBackward0>)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.log(P / Q)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "d8f1e637",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([18.3773, 18.9101, 17.6657, 17.4784, 18.1536, 17.6619, 17.1860, 18.5178,\n",
       "        18.4934, 18.1933, 15.7202, 18.0625, 17.2854, 18.8139, 16.9468, 16.5230,\n",
       "        19.3319, 17.9872, 16.1749, 16.9167, 18.0571, 19.3062, 16.4205, 19.0276,\n",
       "        15.4649, 17.1769, 16.9854, 17.2365, 17.5906, 16.4967, 17.7983, 18.3045,\n",
       "        17.5223, 17.2644, 17.3703, 17.6799, 16.9836, 17.5593, 16.6747, 16.3116,\n",
       "        16.4792, 18.1891, 16.8379, 17.2048, 17.7660, 18.0413, 16.8589, 16.2818,\n",
       "        17.7860, 17.1600, 16.4007, 18.1043, 16.3920, 17.4093, 17.1244, 16.3596,\n",
       "        19.7801, 17.5949, 18.6025, 16.4430], device='cuda:0',\n",
       "       grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2cd2e329",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "06cfc737",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "07714ddc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "be821ebc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.1295 | Val AUC: 0.1935 | Val F1: 0.1842\n",
      "Epoch: 005 | Loss: 0.0202 | Val AUC: 0.7797 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0042 | Val AUC: 0.8488 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0172 | Val AUC: 0.8509 | Val F1: 0.4751\n",
      "Epoch: 020 | Loss: 0.0062 | Val AUC: 0.8458 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0047 | Val AUC: 0.8428 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0087 | Val AUC: 0.8475 | Val F1: 0.4751\n",
      "Epoch: 035 | Loss: 0.0106 | Val AUC: 0.8578 | Val F1: 0.4751\n",
      "Epoch: 040 | Loss: 0.0083 | Val AUC: 0.8616 | Val F1: 0.4751\n",
      "Epoch: 045 | Loss: 0.0029 | Val AUC: 0.8661 | Val F1: 0.5018\n",
      "Epoch: 050 | Loss: 0.0173 | Val AUC: 0.8736 | Val F1: 0.7153\n",
      "Epoch: 055 | Loss: 0.0272 | Val AUC: 0.8817 | Val F1: 0.6775\n",
      "Epoch: 060 | Loss: 0.0357 | Val AUC: 0.8862 | Val F1: 0.5406\n",
      "Epoch: 065 | Loss: 0.0112 | Val AUC: 0.8912 | Val F1: 0.5224\n",
      "Epoch: 070 | Loss: 0.0414 | Val AUC: 0.8875 | Val F1: 0.6576\n",
      "Epoch: 075 | Loss: 0.0149 | Val AUC: 0.8890 | Val F1: 0.7307\n",
      "Epoch: 080 | Loss: 0.0578 | Val AUC: 0.8890 | Val F1: 0.7255\n",
      "Epoch: 085 | Loss: 0.0539 | Val AUC: 0.8912 | Val F1: 0.7007\n",
      "Epoch: 090 | Loss: 0.0036 | Val AUC: 0.8918 | Val F1: 0.7372\n",
      "Epoch: 095 | Loss: 0.0074 | Val AUC: 0.8922 | Val F1: 0.7716\n",
      "Epoch: 100 | Loss: 0.0365 | Val AUC: 0.8932 | Val F1: 0.7609\n",
      "Epoch: 105 | Loss: 0.0117 | Val AUC: 0.8915 | Val F1: 0.7632\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[9]\u001b[39m\u001b[32m, line 145\u001b[39m\n\u001b[32m    142\u001b[39m writer.add_scalar(\u001b[33m'\u001b[39m\u001b[33mFineTune/Train_Loss\u001b[39m\u001b[33m'\u001b[39m, avg_loss, epoch)\n\u001b[32m    144\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m epoch % \u001b[32m5\u001b[39m == \u001b[32m0\u001b[39m:\n\u001b[32m--> \u001b[39m\u001b[32m145\u001b[39m     val_auc, val_ap, val_f1, val_g_mean = \u001b[43mtest\u001b[49m\u001b[43m(\u001b[49m\u001b[43midx_val\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_val\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mbsne_model\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    147\u001b[39m     writer.add_scalar(\u001b[33m'\u001b[39m\u001b[33mValidation/AUC\u001b[39m\u001b[33m'\u001b[39m, val_auc, epoch)\n\u001b[32m    148\u001b[39m     writer.add_scalar(\u001b[33m'\u001b[39m\u001b[33mValidation/F1\u001b[39m\u001b[33m'\u001b[39m, val_f1, epoch)\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 272\u001b[39m, in \u001b[36mtest\u001b[39m\u001b[34m(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size)\u001b[39m\n\u001b[32m    270\u001b[39m \u001b[38;5;66;03m# 为每个中心节点构建子图\u001b[39;00m\n\u001b[32m    271\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m xi \u001b[38;5;129;01min\u001b[39;00m batch_centers:\n\u001b[32m--> \u001b[39m\u001b[32m272\u001b[39m     subgraph = \u001b[43mcreate_node_subgraph\u001b[49m\u001b[43m(\u001b[49m\u001b[43mxi\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mfeat_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_indexs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    273\u001b[39m     subgraph_data.append(subgraph)\n\u001b[32m    275\u001b[39m \u001b[38;5;66;03m# 获取中心节点预测\u001b[39;00m\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 69\u001b[39m, in \u001b[36mcreate_node_subgraph\u001b[39m\u001b[34m(node_idx, feat_data, edge_indexs, device)\u001b[39m\n\u001b[32m     67\u001b[39m sub_edge_index = []\n\u001b[32m     68\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m rel_idx \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(edge_indexs)):\n\u001b[32m---> \u001b[39m\u001b[32m69\u001b[39m     edge_index = \u001b[43medge_indexs\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m[\u001b[49m\u001b[32;43m0\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m.numpy()\n\u001b[32m     70\u001b[39m     mask = np.isin(edge_index[\u001b[32m0\u001b[39m], sub_nodes) & np.isin(edge_index[\u001b[32m1\u001b[39m], sub_nodes)\n\u001b[32m     71\u001b[39m     local_edges = edge_index[:, mask]\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 进行分类测试（0708\n",
    "\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "#     \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 50,\n",
    "    \"pretrain_lr\": 0.0005, #0.0005，\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"num_layers\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=args['num_layers'],\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "    \n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "965b185f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "509fd972",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "15bfb7b4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[ 1.5379e-01, -2.5115e-01, -1.4835e+00,  ..., -3.8957e+00,\n",
      "         -3.1473e-01,  1.0758e-01],\n",
      "        [ 5.9252e-01, -1.2292e-02, -3.0347e-01,  ..., -2.4255e+00,\n",
      "         -6.3382e-04, -2.3526e-01],\n",
      "        [-1.4551e-01,  4.6990e-02,  6.0272e-03,  ..., -2.2072e+00,\n",
      "         -4.1145e-01,  3.6693e-01],\n",
      "        ...,\n",
      "        [-4.5456e-01, -1.9187e-01, -6.8510e-02,  ..., -1.5710e+00,\n",
      "         -6.7994e-01,  8.8107e-01],\n",
      "        [-6.9214e-02, -9.3351e-01, -1.5648e-01,  ..., -3.2156e+00,\n",
      "          3.9368e-01,  1.2101e+00],\n",
      "        [ 4.1762e-02, -3.7311e-01, -1.0503e-01,  ...,  1.5461e-01,\n",
      "         -4.5418e-01,  4.0897e-02]], device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[-0.0184, -0.5900, -0.7437,  ..., -1.0337,  1.4888, -1.1678],\n",
      "        [ 0.3477,  0.5557, -0.7436,  ...,  0.1818, -0.5751, -0.3329],\n",
      "        [ 0.0310,  0.8537, -1.4270,  ..., -0.1282, -0.2549,  0.1970],\n",
      "        ...,\n",
      "        [ 1.7768, -0.5098,  0.2406,  ..., -0.1706, -0.4174,  0.7792],\n",
      "        [ 0.1729,  0.1236, -0.6441,  ..., -3.1630, -0.0760, -0.6291],\n",
      "        [-0.6186,  1.7482, -0.7534,  ..., -1.9393,  0.1020,  0.0648]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'orig_dists_bp' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 3\u001b[39m\n\u001b[32m      1\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mbp_features:\u001b[39m\u001b[33m\"\u001b[39m,bp_features)\n\u001b[32m      2\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mbu_features:\u001b[39m\u001b[33m\"\u001b[39m,bu_features)\n\u001b[32m----> \u001b[39m\u001b[32m3\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33morig_dists_bp:\u001b[39m\u001b[33m\"\u001b[39m,\u001b[43morig_dists_bp\u001b[49m)\n\u001b[32m      4\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mP:\u001b[39m\u001b[33m\"\u001b[39m,P)\n\u001b[32m      5\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33mQ:\u001b[39m\u001b[33m\"\u001b[39m,Q)\n",
      "\u001b[31mNameError\u001b[39m: name 'orig_dists_bp' is not defined"
     ]
    }
   ],
   "source": [
    "print(\"probs:\",probs)\n",
    "print(\"center_feature:\",center_feature)\n",
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "\n",
    "print(\"P:\",P)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e87fc62",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "fb44953e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.0000)"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "probs.sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "069c1034",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-->name: feature_proj.0.weight -->grad_requirs: True --weight: 0.008665 -->grad_value: No gradient\n",
      "-->name: feature_proj.0.bias -->grad_requirs: True --weight: 0.010582 -->grad_value: No gradient\n",
      "-->name: feature_proj.1.weight -->grad_requirs: True --weight: 0.994209 -->grad_value: No gradient\n",
      "-->name: feature_proj.1.bias -->grad_requirs: True --weight: -0.012147 -->grad_value: No gradient\n",
      "-->name: norm.weight -->grad_requirs: True --weight: 0.996044 -->grad_value: No gradient\n",
      "-->name: norm.bias -->grad_requirs: True --weight: -0.003284 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001154 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: 0.000770 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000251 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.002110 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000633 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear1.bias -->grad_requirs: True --weight: -0.007763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear2.weight -->grad_requirs: True --weight: 0.000221 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.linear2.bias -->grad_requirs: True --weight: 0.005467 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm1.weight -->grad_requirs: True --weight: 1.003334 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm1.bias -->grad_requirs: True --weight: -0.006549 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm2.weight -->grad_requirs: True --weight: 1.000152 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.0.norm2.bias -->grad_requirs: True --weight: -0.001157 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: -0.001489 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.003456 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.000381 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: -0.005391 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear1.weight -->grad_requirs: True --weight: -0.000333 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear1.bias -->grad_requirs: True --weight: -0.014522 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear2.weight -->grad_requirs: True --weight: 0.000884 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.linear2.bias -->grad_requirs: True --weight: 0.008839 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm1.weight -->grad_requirs: True --weight: 1.000222 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm1.bias -->grad_requirs: True --weight: -0.004946 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm2.weight -->grad_requirs: True --weight: 0.988574 -->grad_value: No gradient\n",
      "-->name: relation_encoders.0.layers.1.norm2.bias -->grad_requirs: True --weight: 0.001514 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001059 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.002641 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.001959 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.004741 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000244 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear1.bias -->grad_requirs: True --weight: -0.014904 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear2.weight -->grad_requirs: True --weight: -0.003943 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.linear2.bias -->grad_requirs: True --weight: -0.000373 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm1.weight -->grad_requirs: True --weight: 1.002805 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm1.bias -->grad_requirs: True --weight: 0.000757 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm2.weight -->grad_requirs: True --weight: 1.001122 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.0.norm2.bias -->grad_requirs: True --weight: -0.005578 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001208 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.000842 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: -0.002608 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.000255 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear1.weight -->grad_requirs: True --weight: -0.000620 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear1.bias -->grad_requirs: True --weight: -0.009980 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear2.weight -->grad_requirs: True --weight: 0.000296 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.linear2.bias -->grad_requirs: True --weight: 0.000464 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm1.weight -->grad_requirs: True --weight: 0.999484 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm1.bias -->grad_requirs: True --weight: 0.012806 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm2.weight -->grad_requirs: True --weight: 1.025239 -->grad_value: No gradient\n",
      "-->name: relation_encoders.1.layers.1.norm2.bias -->grad_requirs: True --weight: -0.002184 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.in_proj_weight -->grad_requirs: True --weight: 0.001369 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.000463 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000816 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.self_attn.out_proj.bias -->grad_requirs: True --weight: 0.003603 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear1.weight -->grad_requirs: True --weight: 0.000447 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear1.bias -->grad_requirs: True --weight: -0.000787 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear2.weight -->grad_requirs: True --weight: -0.001296 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.linear2.bias -->grad_requirs: True --weight: 0.000606 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm1.weight -->grad_requirs: True --weight: 0.997347 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm1.bias -->grad_requirs: True --weight: -0.000370 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm2.weight -->grad_requirs: True --weight: 0.999627 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.0.norm2.bias -->grad_requirs: True --weight: 0.001037 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.in_proj_weight -->grad_requirs: True --weight: -0.000457 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.in_proj_bias -->grad_requirs: True --weight: -0.004389 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.out_proj.weight -->grad_requirs: True --weight: 0.000166 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.self_attn.out_proj.bias -->grad_requirs: True --weight: -0.000464 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear1.weight -->grad_requirs: True --weight: 0.000110 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear1.bias -->grad_requirs: True --weight: -0.002366 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear2.weight -->grad_requirs: True --weight: 0.001228 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.linear2.bias -->grad_requirs: True --weight: 0.005763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm1.weight -->grad_requirs: True --weight: 0.999454 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm1.bias -->grad_requirs: True --weight: -0.000763 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm2.weight -->grad_requirs: True --weight: 0.995095 -->grad_value: No gradient\n",
      "-->name: relation_encoders.2.layers.1.norm2.bias -->grad_requirs: True --weight: -0.002613 -->grad_value: No gradient\n",
      "-->name: classifier.0.weight -->grad_requirs: False --weight: -0.000072 -->grad_value: No gradient\n",
      "-->name: classifier.0.bias -->grad_requirs: False --weight: 0.001129 -->grad_value: No gradient\n",
      "-->name: classifier.3.weight -->grad_requirs: False --weight: -0.001398 -->grad_value: No gradient\n",
      "-->name: classifier.3.bias -->grad_requirs: False --weight: -0.008921 -->grad_value: No gradient\n"
     ]
    }
   ],
   "source": [
    "for name, parms in bsne_model.named_parameters():\n",
    "    grad_value = parms.grad\n",
    "    if grad_value is not None:\n",
    "        grad_mean = torch.mean(grad_value)\n",
    "    else:\n",
    "        grad_mean = \"No gradient\"\n",
    "\n",
    "    print(f'-->name: {name} '\n",
    "          f'-->grad_requirs: {parms.requires_grad} '\n",
    "          f'--weight: {torch.mean(parms.data).item():.6f} '\n",
    "          f'-->grad_value: {grad_mean}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "adc2ddbd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[-0.5702,  1.3168, -0.1787,  ...,  0.7632,  0.0429, -0.1725],\n",
      "        [ 0.1251, -1.1670,  0.3653,  ...,  1.0615, -0.5137,  0.9988],\n",
      "        [-1.0533, -0.3202,  0.4615,  ...,  1.0612, -0.6082,  0.9952],\n",
      "        ...,\n",
      "        [-0.3784, -0.1971,  0.5413,  ...,  0.5937, -0.9628, -0.3055],\n",
      "        [-0.8808, -0.1173,  0.2112,  ..., -1.0315, -1.3149,  0.8907],\n",
      "        [-2.3371,  0.6095, -0.0873,  ...,  0.0804, -0.5554,  0.6815]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[-0.2823,  0.3090,  0.5724,  ...,  0.4419, -1.0853, -1.1917],\n",
      "        [-0.2784, -0.3072,  0.2529,  ...,  0.5045, -0.9009, -1.1452],\n",
      "        [-1.5317, -0.4242,  0.1296,  ..., -0.1155,  0.4415,  0.5392],\n",
      "        ...,\n",
      "        [ 0.7232, -2.5266,  0.5857,  ...,  0.1232, -0.4201,  0.9106],\n",
      "        [ 0.3383, -0.7859,  0.3099,  ...,  0.0577, -0.6736, -1.0603],\n",
      "        [ 0.0080,  0.0586,  0.5155,  ...,  0.1816, -0.3376, -0.1120]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "orig_dists_bp: [2 1 1 1 2 1 1 1 2 1 2 2 1 1 2 2 1 1 1 1 2 1 1 2 1 1 2 2 2 2 1 1 2 2 2 1 1\n",
      " 1 1 2]\n",
      "P: tensor([0.0015, 0.0424, 0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0424, 0.0015,\n",
      "        0.0424, 0.0015, 0.0015, 0.0424, 0.0424, 0.0015, 0.0015, 0.0424, 0.0424,\n",
      "        0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0015, 0.0424, 0.0424, 0.0015,\n",
      "        0.0015, 0.0015, 0.0015, 0.0424, 0.0424, 0.0015, 0.0015, 0.0015, 0.0424,\n",
      "        0.0424, 0.0424, 0.0424, 0.0015], device='cuda:0', dtype=torch.float64)\n",
      "feat_dists_bp: tensor([18.2731, 16.3308, 15.3421, 16.1293, 17.3107, 15.8543, 15.5849, 16.0872,\n",
      "        17.1668, 15.5329, 17.4775, 13.7219, 16.3866, 15.4689, 17.1367, 16.1066,\n",
      "        15.8276, 17.2620, 14.2742, 15.3443, 15.9957, 15.4327, 16.2292, 15.6903,\n",
      "        16.0715, 16.0175, 17.6961, 15.9743, 17.8844, 15.9710, 16.4330, 16.2207,\n",
      "        18.3810, 18.1741, 19.0851, 15.8568, 15.9311, 17.3604, 15.2764, 17.7107],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "Q: tensor([0.0062, 0.0208, 0.0385, 0.0236, 0.0113, 0.0280, 0.0331, 0.0242, 0.0124,\n",
      "        0.0342, 0.0102, 0.1054, 0.0201, 0.0356, 0.0126, 0.0239, 0.0285, 0.0117,\n",
      "        0.0748, 0.0384, 0.0256, 0.0364, 0.0222, 0.0310, 0.0245, 0.0253, 0.0089,\n",
      "        0.0260, 0.0079, 0.0260, 0.0195, 0.0223, 0.0058, 0.0066, 0.0038, 0.0280,\n",
      "        0.0267, 0.0110, 0.0401, 0.0088], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "unnorm_probs_global: [0.01 0.1  0.01 ... 0.01 0.01 0.01]\n",
      "Z_global: 235.74600000010003\n",
      "bp_global_prob_sum: 0.01047737819517172\n",
      "k_Bp: 3.128545129078276\n",
      "feat_dists_bu: tensor([16.4020, 16.3742, 18.9296, 17.1797, 16.4774, 16.7754, 17.3933, 17.5789,\n",
      "        17.6073, 16.7882, 16.0743, 15.5274, 15.9045, 18.0716, 15.4278, 14.1915,\n",
      "        15.3967, 14.9977, 17.3479, 18.7902, 16.9052, 14.5443, 16.8814, 16.4295,\n",
      "        15.1829, 17.4214, 18.7773, 16.6827, 17.3622, 17.5833, 18.2887, 17.0419,\n",
      "        15.3728, 17.9242, 15.3646, 18.1372, 16.5211, 16.9873, 17.0721, 19.0133],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "sum_e_bp: tensor(24.7924, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "sum_e_bu: tensor(23.9281, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "global_ratio: tensor(3.0195, device='cuda:0', grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "print(\"orig_dists_bp:\",orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "\n",
    "print(\"unnorm_probs_global:\",unnorm_probs_global)\n",
    "print(\"Z_global:\",Z_global)\n",
    "print(\"bp_global_prob_sum:\",bp_global_prob_sum)\n",
    "print(\"k_Bp:\",k_Bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "c5142326",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "bp_features: tensor([[ 0.0735,  0.1432,  0.2947,  ..., -0.0746,  0.5167, -0.1833],\n",
      "        [ 0.8301,  0.0871, -1.5590,  ...,  0.5665, -0.5540,  0.1626],\n",
      "        [ 1.5938, -0.7020, -0.6375,  ...,  0.5392, -2.2870,  0.0392],\n",
      "        ...,\n",
      "        [ 0.2287,  0.2691, -0.6960,  ...,  1.8262, -0.2489, -1.0601],\n",
      "        [ 1.4011,  0.6579,  0.0041,  ...,  0.4725, -0.4251, -0.2861],\n",
      "        [ 0.7985,  0.3066, -0.7456,  ...,  1.0053, -0.0059,  0.4827]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "bu_features: tensor([[ 0.6321,  0.6420, -0.1064,  ...,  1.5194, -1.2372, -0.6961],\n",
      "        [-0.0558, -0.0347, -0.2537,  ..., -0.3866,  0.1960, -0.6085],\n",
      "        [ 1.3395,  0.4545, -1.1564,  ...,  0.7400, -0.5331, -2.3669],\n",
      "        ...,\n",
      "        [ 1.1103,  0.4445, -0.2255,  ..., -1.2766, -0.6034, -1.1156],\n",
      "        [ 0.8419,  0.0935,  0.0104,  ...,  0.6042, -0.5481, -0.7126],\n",
      "        [ 1.8648,  1.4712, -0.4018,  ...,  0.8256,  0.7816,  0.0853]],\n",
      "       device='cuda:0', grad_fn=<StackBackward0>)\n",
      "orig_dists_bp: [1 1 1 2 2 1 1 2 2 2 2 2 2 2 2 2 1 2 1 2 2 2 1 2 2 2 2 2 2 2 2 2 2 2 2 1 1\n",
      " 2 2 2]\n",
      "P: tensor([0.0903, 0.0903, 0.0903, 0.0032, 0.0032, 0.0903, 0.0903, 0.0032, 0.0032,\n",
      "        0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0903, 0.0032,\n",
      "        0.0903, 0.0032, 0.0032, 0.0032, 0.0903, 0.0032, 0.0032, 0.0032, 0.0032,\n",
      "        0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0032, 0.0903,\n",
      "        0.0903, 0.0032, 0.0032, 0.0032], device='cuda:0', dtype=torch.float64)\n",
      "feat_dists_bp: tensor([17.1739, 17.9419, 17.6980, 17.8211, 17.2373, 18.0952, 17.2932, 17.2454,\n",
      "        15.9230, 18.3066, 16.3189, 17.9622, 17.3002, 17.0915, 18.1144, 16.4267,\n",
      "        16.8798, 17.9886, 18.8088, 16.7149, 16.3378, 17.5355, 17.1079, 16.9240,\n",
      "        16.4198, 16.7808, 18.6371, 18.1660, 17.5035, 18.7379, 16.9212, 16.8213,\n",
      "        16.4792, 17.4383, 17.9245, 17.4383, 17.7713, 16.0490, 16.9128, 17.5431],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "Q: tensor([0.0219, 0.0090, 0.0120, 0.0104, 0.0204, 0.0076, 0.0191, 0.0202, 0.0931,\n",
      "        0.0059, 0.0589, 0.0088, 0.0190, 0.0241, 0.0074, 0.0520, 0.0308, 0.0086,\n",
      "        0.0033, 0.0373, 0.0576, 0.0145, 0.0237, 0.0293, 0.0524, 0.0346, 0.0040,\n",
      "        0.0070, 0.0150, 0.0036, 0.0294, 0.0330, 0.0490, 0.0162, 0.0092, 0.0162,\n",
      "        0.0110, 0.0805, 0.0297, 0.0143], device='cuda:0',\n",
      "       grad_fn=<SoftmaxBackward0>)\n",
      "unnorm_probs_global: [0.13533528 0.13533528 0.13533528 ... 0.13533528 0.13533528 0.13533528]\n",
      "Z_global: 1635.3245811136128\n",
      "bp_global_prob_sum: 0.004732303909687978\n",
      "k_Bp: 1.4130659474328304\n",
      "feat_dists_bu: tensor([18.5493, 18.2278, 17.8553, 18.3226, 17.2628, 17.0861, 17.7634, 18.5652,\n",
      "        16.0455, 16.7946, 16.8388, 17.5043, 17.0578, 17.6884, 17.6687, 17.8079,\n",
      "        18.3602, 17.6491, 16.6072, 18.1221, 18.5421, 17.4638, 17.9239, 18.8807,\n",
      "        17.6198, 18.5791, 16.7308, 16.2829, 16.9668, 17.1444, 17.2959, 16.9767,\n",
      "        17.2846, 17.1383, 17.6546, 18.2743, 16.7264, 18.1228, 18.0507, 16.7941],\n",
      "       device='cuda:0', grad_fn=<SqueezeBackward1>)\n",
      "sum_e_bp: tensor(25.2063, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "sum_e_bu: tensor(24.1805, device='cuda:0', grad_fn=<SumBackward0>)\n",
      "global_ratio: tensor(1.3556, device='cuda:0', grad_fn=<MulBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(\"bp_features:\",bp_features)\n",
    "print(\"bu_features:\",bu_features)\n",
    "print(\"orig_dists_bp:\",orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "\n",
    "\n",
    "print(\"feat_dists_bp:\",feat_dists_bp)\n",
    "print(\"Q:\",Q)\n",
    "\n",
    "\n",
    "print(\"unnorm_probs_global:\",unnorm_probs_global)\n",
    "print(\"Z_global:\",Z_global)\n",
    "print(\"bp_global_prob_sum:\",bp_global_prob_sum)\n",
    "print(\"k_Bp:\",k_Bp)\n",
    "print(\"feat_dists_bu:\",feat_dists_bu)\n",
    "print(\"sum_e_bp:\",sum_e_bp)\n",
    "print(\"sum_e_bu:\",sum_e_bu)\n",
    "print(\"global_ratio:\",global_ratio)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 79,
   "id": "600b6423",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(25.3457, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 79,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "bb3ae0bc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(26.3437, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "cf4abad0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.2500, device='cuda:0', grad_fn=<PowBackward0>)"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "688ce07c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([15.3787, 16.0121, 15.4253, 15.6754, 16.1709, 15.9302, 16.6181, 15.6466,\n",
       "        16.4141, 16.9830, 14.4971, 14.9762, 15.7784, 16.2958, 16.1495, 14.7736,\n",
       "        15.1410, 15.6543, 16.2820, 16.7194, 16.9441, 16.7581, 16.3961, 15.5213,\n",
       "        15.6640, 15.7070, 16.2821, 17.3215, 15.1667, 16.1609, 16.2895, 15.4043,\n",
       "        15.4740, 14.9284, 15.5751, 14.2662, 15.6083, 17.0193, 15.5923, 15.1397],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "id": "69f718fb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(1.0011e-06, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 72,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum_e_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "id": "63fefd71",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([17.9786, 17.8149, 17.2145, 18.3167, 16.4932, 17.2646, 18.8500, 17.8485,\n",
       "        16.4257, 17.3208, 17.3076, 17.1714, 17.3531, 17.5322, 17.8556, 18.1759,\n",
       "        17.2787, 18.6417, 18.6537, 18.1426, 17.9105, 19.2785, 16.3744, 16.7953,\n",
       "        18.3104, 18.5197, 18.4340, 18.1621, 17.8350, 17.6045, 17.5249, 17.0740,\n",
       "        17.7094, 17.5130, 18.7455, 18.2615, 16.3142, 17.2683, 17.7009, 18.7330],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 71,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "ed949b47",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.1971, 0.2812, 0.2541, 0.1176, 0.1694, 0.1517, 0.1961, 0.1920, 0.2319,\n",
       "        0.2380, 0.0799, 0.1419, 0.1515, 0.1990, 0.2612, 0.1326, 0.2011, 0.1924,\n",
       "        0.2845, 0.2461, 0.2412, 0.1704, 0.2357, 0.1555, 0.0924, 0.1515, 0.2337,\n",
       "        0.2247, 0.1362, 0.1792, 0.3258, 0.3037, 0.2473, 0.2268, 0.1531, 0.1798,\n",
       "        0.2265, 0.3413, 0.1420, 0.1489], device='cuda:0',\n",
       "       grad_fn=<SqueezeBackward4>)"
      ]
     },
     "execution_count": 49,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bu"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "ea8f2ada",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([17.3830, 18.8074, 17.2807, 16.2711, 16.5384, 17.5892, 17.5048, 18.6080,\n",
       "        17.4325, 17.3218, 15.9989, 17.1385, 16.9832, 17.4741, 17.7036, 16.7957,\n",
       "        16.9648, 16.8700, 18.7639, 16.2010, 15.8607, 17.6674, 17.1501, 17.4688,\n",
       "        17.7472, 18.1129, 17.2142, 16.6794, 17.3207, 17.3963, 17.6601, 16.1668,\n",
       "        17.2354, 17.4118, 17.9075, 17.9652, 16.5519, 17.9724, 17.1188, 17.2855],\n",
       "       device='cuda:0', grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 50,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "571cab27",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74f15734",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
