{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e7f126eb",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/home/sczc619/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import os\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import copy\n",
    "import copy as cp\n",
    "import dgl\n",
    "from collections import defaultdict\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, \\\n",
    "    confusion_matrix\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "\n",
    "\n",
    "def create_node_subgraph(node_idx, feat_data, edge_indexs, device):\n",
    "    \"\"\"\n",
    "    为单个节点创建一阶邻居子图（所有邻居）\n",
    "    \"\"\"\n",
    "    neighbors = set()\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        # 找出以中心点为起点的边的终点\n",
    "        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()\n",
    "        neighbors.update(rel_neighbors)\n",
    "\n",
    "    # 移除中心节点自身\n",
    "    neighbors.discard(node_idx)\n",
    "    neighbors = list(neighbors)\n",
    "\n",
    "    # 如果邻居太多，进行随机采样截取\n",
    "    sample_size = 399\n",
    "    if len(neighbors) > sample_size:\n",
    "        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()\n",
    "\n",
    "    # 构建子图节点列表，确保中心节点是第一个\n",
    "    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]\n",
    "\n",
    "    # 构建子图边列表\n",
    "    sub_edge_index = []\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "        local_edges = edge_index[:, mask]\n",
    "\n",
    "        # 创建节点映射\n",
    "        node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "\n",
    "        # 将全局索引映射到局部索引\n",
    "        if len(local_edges) > 0 and local_edges.size > 0:\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "        else:\n",
    "            # 添加自环确保图不为空\n",
    "            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)\n",
    "\n",
    "        sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "    # 创建子图数据\n",
    "    subgraph = {\n",
    "        'features': feat_data[sub_nodes].clone(),\n",
    "        'edges': sub_edge_index,\n",
    "        'global_idx': sub_nodes\n",
    "    }\n",
    "\n",
    "    return subgraph\n",
    "\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def Visualization(labels, embedding, prefix):\n",
    "    train_pos, train_neg = pos_neg_split(list(range(len(labels))), labels)\n",
    "    sampled_idx_train = undersample(train_pos, train_neg, scale=1)\n",
    "    tsne = TSNE(n_components=2, random_state=43)\n",
    "    sampled_idx_train = np.array(sampled_idx_train)\n",
    "    sampled_idx_train = np.random.choice(sampled_idx_train, size=5000, replace=True)\n",
    "    ps = embedding[sampled_idx_train]\n",
    "    ls = labels[sampled_idx_train]\n",
    "\n",
    "    X_reduced = tsne.fit_transform(ps)\n",
    "\n",
    "    scaler = MinMaxScaler(feature_range=(0, 1))\n",
    "    X_scaled = scaler.fit_transform(X_reduced)\n",
    "    print(X_scaled.shape)\n",
    "\n",
    "    plt.figure(figsize=(8, 8))\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 0, 0], X_scaled[ls == 0, 1], c='#14517C', label='Label 0', s=3)\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 1, 0], X_scaled[ls == 1, 1], c='#FA7F6F', label='Label 1', s=3)\n",
    "\n",
    "    ax = plt.gca()\n",
    "    ax.spines['top'].set_visible(False)\n",
    "    ax.spines['right'].set_visible(False)\n",
    "    ax.spines['left'].set_visible(False)\n",
    "    ax.spines['bottom'].set_visible(False)\n",
    "\n",
    "    plt.xticks([])\n",
    "    plt.yticks([])\n",
    "\n",
    "    plt.xlim(0, 1)\n",
    "    plt.ylim(0, 1)\n",
    "    filepath = os.path.join(prefix, 'HOGRL.png')\n",
    "    plt.savefig(filepath)\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):\n",
    "    model.eval()\n",
    "    all_probs = []\n",
    "    all_labels = []\n",
    "\n",
    "    # 分批处理\n",
    "    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):\n",
    "        subgraph_data = []\n",
    "\n",
    "        # 为每个中心节点构建子图\n",
    "        for xi in batch_centers:\n",
    "            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "            subgraph_data.append(subgraph)\n",
    "\n",
    "        # 获取中心节点预测\n",
    "        with torch.no_grad():\n",
    "            center_logits, _ = model(subgraph_data)  # [B, 2]\n",
    "            probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率\n",
    "            all_probs.extend(probs.cpu().numpy())\n",
    "            all_labels.extend([labels[xi] for xi in batch_centers])\n",
    "\n",
    "    # 计算指标\n",
    "    auc_score = roc_auc_score(all_labels, all_probs)\n",
    "    ap_score = average_precision_score(all_labels, all_probs)\n",
    "    pred_labels = (np.array(all_probs) >= 0.5).astype(int)\n",
    "    f1 = f1_score(all_labels, pred_labels, average='macro')\n",
    "    g_mean = calculate_g_mean(all_labels, pred_labels)\n",
    "\n",
    "    return auc_score, ap_score, f1, g_mean\n",
    "\n",
    "\n",
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    #         self.node_feature_extractor = nn.Sequential(\n",
    "    #             nn.Linear(relation_nums * d_model, d_model),\n",
    "    #             nn.ReLU(),\n",
    "    #             nn.LayerNorm(d_model)\n",
    "    #         )\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 为每个子图单独处理\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 处理单个子图\n",
    "            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码\n",
    "                encoder_output = self.relation_encoders[rel_idx](\n",
    "                    src=features,\n",
    "                    mask=adj_mask\n",
    "                )\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征（第一个节点）\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            center_logits = self.classifier(center_features)  # [1, out_feat]\n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "817224f9",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "local_loss: 0.0016437826581571593,global_loss: -1.4995879101397158\n",
      "local_loss: -0.06081257776766403,global_loss: -0.8467811035176434\n",
      "local_loss: -0.0159433407145749,global_loss: -0.9718226732169523\n",
      "local_loss: -0.07453283943930285,global_loss: -1.1587293816081405\n",
      "local_loss: -0.04494411257001846,global_loss: -1.4559385778312046\n",
      "local_loss: -0.04378341145409235,global_loss: -1.5906977058770195\n",
      "local_loss: -0.024076403958424326,global_loss: -0.7379650959566844\n",
      "local_loss: 0.001808529896316339,global_loss: -1.4589749982572828\n",
      "Pretrain Epoch: 000, BSNE Loss: -1.2476\n",
      "local_loss: -0.07385012216141269,global_loss: -1.7537944138889343\n",
      "local_loss: -0.09356777583915062,global_loss: -2.0346644520246393\n",
      "local_loss: -0.006612906164699064,global_loss: -1.5758488954546643\n",
      "local_loss: -0.09544890978152762,global_loss: -1.311986692521368\n",
      "local_loss: -0.052154280631391374,global_loss: -2.0821388638575864\n",
      "local_loss: -0.023301668462266632,global_loss: -1.3507880495655984\n",
      "local_loss: -0.09485202620138886,global_loss: -1.9101305165119677\n",
      "local_loss: -0.09008115687484734,global_loss: -1.3668432760384501\n",
      "local_loss: -0.07375713770470754,global_loss: -1.2242473739572701\n",
      "local_loss: -0.06001398906033093,global_loss: -1.3235085181027881\n",
      "local_loss: -0.08603412556013437,global_loss: -1.5983361399516889\n",
      "local_loss: -0.007593601253403491,global_loss: -1.6697180783486787\n",
      "local_loss: 0.0014166151653360562,global_loss: -1.093280529173974\n",
      "local_loss: -0.0236278931980716,global_loss: -1.336892208392504\n",
      "local_loss: -0.09502995458099266,global_loss: -1.4587137215405024\n",
      "local_loss: -0.04509358966584946,global_loss: -1.3693985743197645\n",
      "local_loss: -0.02334937488461829,global_loss: -1.4538425025461439\n",
      "local_loss: -0.09497687806397838,global_loss: -1.7184865465471322\n",
      "local_loss: -0.07384546732248737,global_loss: -1.6539757486654156\n",
      "local_loss: -0.08646915999413501,global_loss: -1.4144751779544689\n",
      "local_loss: -0.0594714505536864,global_loss: -1.0667636861836218\n",
      "local_loss: -0.06012970154405489,global_loss: -1.129729211902577\n",
      "local_loss: -0.007140095056155304,global_loss: -0.8841785480959904\n",
      "local_loss: -0.023644559792077368,global_loss: -1.2797291285938253\n",
      "local_loss: -0.0600395679678884,global_loss: -1.6575943884573203\n",
      "local_loss: 0.001878318420399068,global_loss: -0.897350126035288\n",
      "local_loss: -0.03472399376828294,global_loss: -0.6491839191946899\n",
      "local_loss: 0.0012418657001202825,global_loss: -1.5724495650615293\n",
      "local_loss: -0.022695846852769562,global_loss: -1.2528747152836182\n",
      "local_loss: -0.022796868210714596,global_loss: -1.5393546720092268\n",
      "local_loss: -0.08352445101701497,global_loss: -1.5065063252162074\n",
      "local_loss: -0.09490165966962984,global_loss: -1.4210833355445651\n",
      "local_loss: -0.056182195492472095,global_loss: -1.4421613533592126\n",
      "local_loss: -0.08370990661175898,global_loss: -1.433939550977634\n",
      "local_loss: -0.060057710486431465,global_loss: -1.4870078358862002\n",
      "local_loss: 0.0018527168063098287,global_loss: -0.9771732770628935\n",
      "local_loss: -0.0611413115751824,global_loss: -0.7295338474389977\n",
      "local_loss: -0.008172515623259099,global_loss: -1.1948807372571442\n",
      "local_loss: 0.0007830348212721612,global_loss: -1.0951841534975426\n",
      "local_loss: 0.0025734675241995235,global_loss: -1.5162145361529369\n",
      "Pretrain Epoch: 005, BSNE Loss: -1.2675\n",
      "local_loss: -0.04456326727306789,global_loss: -0.8701624916994318\n",
      "local_loss: -0.05935840608739566,global_loss: -1.4298426664248038\n",
      "local_loss: -0.08793685549562212,global_loss: -0.9697729044699812\n",
      "local_loss: 0.00201732780528095,global_loss: -1.3572686192097552\n",
      "local_loss: -0.007101357713684136,global_loss: -1.031976316717601\n",
      "local_loss: -0.04415140485430278,global_loss: -1.2885090938125225\n",
      "local_loss: 0.0014770145387084522,global_loss: -1.1241950519860915\n",
      "local_loss: -0.044238325456283804,global_loss: -1.2421870057168034\n",
      "local_loss: -0.007537357175266865,global_loss: -1.2617143231883683\n",
      "local_loss: -0.02364081321440577,global_loss: -1.4908638963471237\n",
      "local_loss: -0.09755727750640224,global_loss: -1.3133111155010917\n",
      "local_loss: -0.015364142513077505,global_loss: -1.7298238278083382\n",
      "local_loss: -0.022933402583680092,global_loss: -1.007855711660151\n",
      "local_loss: -0.060410218032266386,global_loss: -1.257495439199751\n",
      "local_loss: -0.02360539102551068,global_loss: -1.6618477474689206\n",
      "local_loss: -0.09104018993265055,global_loss: -1.1709203741635612\n",
      "local_loss: 0.0016180788646768765,global_loss: -1.114501027628469\n",
      "local_loss: -0.022597482158991662,global_loss: -1.5142160399773459\n",
      "local_loss: -0.08304488907505068,global_loss: -1.5077010585270656\n",
      "local_loss: 0.001964194521938316,global_loss: -1.222315003033664\n",
      "local_loss: -0.015233182593556717,global_loss: -1.4547332546895637\n",
      "local_loss: -0.09081848336061743,global_loss: -1.3475055088304642\n",
      "local_loss: -0.023613485903934462,global_loss: -0.9845442824883405\n",
      "local_loss: -0.05562848537240723,global_loss: -1.35741399937561\n",
      "local_loss: 0.0012936479448480075,global_loss: -0.9875452634992636\n",
      "local_loss: -0.04323106372590806,global_loss: -1.4016976432835488\n",
      "local_loss: -0.09657536761827097,global_loss: -1.378447675694928\n",
      "local_loss: -0.006763189342392749,global_loss: -1.1824985052850052\n",
      "local_loss: -0.0074403321668382,global_loss: -1.4822674883215095\n",
      "local_loss: -0.09862683374537194,global_loss: -1.2476470493047405\n",
      "local_loss: -0.08902516692820951,global_loss: -1.6392262040237575\n",
      "local_loss: -0.030145580448420292,global_loss: -1.4493414781097207\n",
      "local_loss: -0.044620033601425406,global_loss: -0.933040794158693\n",
      "local_loss: -0.0728313470087905,global_loss: -1.7121412152514677\n",
      "local_loss: -0.024468341395935952,global_loss: -0.7543714046962638\n",
      "local_loss: 0.00112355331855755,global_loss: -1.4846592027602763\n",
      "local_loss: -0.04374375449710179,global_loss: -1.5231266545131155\n",
      "local_loss: -0.07377579233106135,global_loss: -1.2119222427113931\n",
      "local_loss: -0.007363334319054068,global_loss: -1.0043102563063573\n",
      "local_loss: 0.002870979851121259,global_loss: -1.8362892915416285\n",
      "Pretrain Epoch: 010, BSNE Loss: -1.3403\n",
      "local_loss: 0.0024601505795322524,global_loss: -1.6580197589422905\n",
      "local_loss: -0.044583986982918385,global_loss: -1.264834236389326\n",
      "local_loss: -0.08366688908812965,global_loss: -1.3424876314445633\n",
      "local_loss: -0.02235431791483941,global_loss: -1.6553680031830262\n",
      "local_loss: -0.020923646894113145,global_loss: -1.1805934660293091\n",
      "local_loss: -0.006481957598443949,global_loss: -1.0424892393182579\n",
      "local_loss: -0.02363748670756402,global_loss: -1.3398128624515413\n",
      "local_loss: 0.0016776267431239096,global_loss: -0.9739794968719406\n",
      "local_loss: -0.10516696602680309,global_loss: -1.633189805684597\n",
      "local_loss: -0.09623697218166614,global_loss: -1.4194967935880676\n",
      "local_loss: -0.09728846031743338,global_loss: -0.7421535400144208\n",
      "local_loss: -0.04442317932794722,global_loss: -1.1889752562958513\n",
      "local_loss: -0.05640995335510812,global_loss: -1.1721903815307446\n",
      "local_loss: -0.04405794477129497,global_loss: -1.0761119598546156\n",
      "local_loss: -0.07401650305897302,global_loss: -1.5133889641011598\n",
      "local_loss: -0.044025917208835655,global_loss: -1.5130728636570132\n",
      "local_loss: -0.034120987768672784,global_loss: -1.3864374026710895\n",
      "local_loss: 0.001867510111479797,global_loss: -1.46057868984759\n",
      "local_loss: 0.001092445370617632,global_loss: -0.8973248532483665\n",
      "local_loss: -0.023480413676600864,global_loss: -1.4817218642925531\n",
      "local_loss: 0.0014452481013595868,global_loss: -0.8357957373564373\n",
      "local_loss: -0.09662261399233107,global_loss: -2.000386724720088\n",
      "local_loss: -0.023253258137443344,global_loss: -1.4177053990640665\n",
      "local_loss: 0.0019679751594069267,global_loss: -1.7139671699522108\n",
      "local_loss: -0.060441984469615254,global_loss: -1.428371766725335\n",
      "local_loss: -0.06046956155193091,global_loss: -1.3954320000010727\n",
      "local_loss: -0.062051096993218494,global_loss: -1.9422847035978366\n",
      "local_loss: -0.059944552487484024,global_loss: -1.6267038166275307\n",
      "local_loss: -0.09407849114211751,global_loss: -1.4912524867762276\n",
      "local_loss: -0.04435666054437788,global_loss: -1.47906361325186\n",
      "local_loss: -0.07390583263606275,global_loss: -1.1590162674081383\n",
      "local_loss: -0.04485802075643145,global_loss: -1.7412327758295418\n"
     ]
    }
   ],
   "source": [
    "# 进行完整测试（修改前）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "    if pretrain_early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "\n",
    "\n",
    "    # 计算TSNE损失\n",
    "    #     tsne_loss = calculate_tsne_loss(\n",
    "    #         model=bsne_model,\n",
    "    #         feat_data=feat_data,\n",
    "    #         dist_matrix=dist_matrix,\n",
    "    #         batch_centers=batch_centers,\n",
    "    #         sample_size=args['sample_size'],\n",
    "    #         temperature=100,\n",
    "    #         eps=1e-10,\n",
    "    #         edge_indexs=edge_indexs,\n",
    "    #         device=device\n",
    "    #     )\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.exp(-dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            # 将中心节点和采样的邻居合并\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            # 如果没有可用邻居，只使用中心节点\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            # 将中心节点和采样的邻居合并\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            # 如果没有可用邻居，只使用中心节点\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的原始距离\n",
    "        orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "        # 计算P向量（基于节点路径距离）\n",
    "        P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "        P_sum = P.sum()\n",
    "        P = P / (P_sum + eps)\n",
    "        P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "        Q_sum = Q.sum()\n",
    "        Q = Q / (Q_sum + eps)\n",
    "\n",
    "        # 局部损失（KL散度）\n",
    "        loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "\n",
    "        # 计算k_Bp\n",
    "        # 获取中心节点到图中所有节点的距离\n",
    "        all_dists = dist_matrix[center_idx]  # [N]\n",
    "        # 计算所有节点的未归一化概率值 (1 + d^2)^{-1}\n",
    "        unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "        # 排除中心节点自身（设置自身概率为0）\n",
    "        unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "        unnorm_probs_all[center_idx] = 0\n",
    "        Z_i = unnorm_probs_all.sum()  # 标量值\n",
    "        sum_p_cond = P.sum()\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "        # 计算中心节点到Bu子图节点的特征距离\n",
    "        bu_features = bu_features_batch[i]\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        # 计算指数距离之和\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        # 全局损失\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "        # 合并损失\n",
    "        total_loss += loss_local + loss_global\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "    bsne_loss = total_loss / batch_size\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    # 更新最佳损失和早停计数器\n",
    "    if bsne_loss.item() < pretrain_best_loss:\n",
    "        pretrain_best_loss = bsne_loss.item()\n",
    "        pretrain_no_improve = 0\n",
    "    else:\n",
    "        pretrain_no_improve += 1\n",
    "\n",
    "    if pretrain_no_improve >= args['pretrain_patience']:\n",
    "        print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "        pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8098a231",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 36\u001b[39m\n\u001b[32m     33\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m'\u001b[39m\u001b[33mloading data...\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m     34\u001b[39m prefix = \u001b[33m\"\u001b[39m\u001b[33m../../data/\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m---> \u001b[39m\u001b[32m36\u001b[39m edge_indexs, feat_data, labels = \u001b[43mload_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mdataset\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mlayers_tree\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprefix\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     38\u001b[39m np.random.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n\u001b[32m     39\u001b[39m rd.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 177\u001b[39m, in \u001b[36mload_data\u001b[39m\u001b[34m(data, k, prefix)\u001b[39m\n\u001b[32m    175\u001b[39m         tree = pickle.load(file)\n\u001b[32m    176\u001b[39m     file.close()\n\u001b[32m--> \u001b[39m\u001b[32m177\u001b[39m     relation1_tree.append(\u001b[43mnumpy_array_to_edge_index\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtree\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[32m    178\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(pickle_file[\u001b[33m'\u001b[39m\u001b[33mamz_usu\u001b[39m\u001b[33m'\u001b[39m], \u001b[33m'\u001b[39m\u001b[33mrb\u001b[39m\u001b[33m'\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[32m    179\u001b[39m     relation2 = pickle.load(file)\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 111\u001b[39m, in \u001b[36mnumpy_array_to_edge_index\u001b[39m\u001b[34m(np_array)\u001b[39m\n\u001b[32m    109\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnumpy_array_to_edge_index\u001b[39m(np_array):\n\u001b[32m    110\u001b[39m     \u001b[38;5;28;01massert\u001b[39;00m np_array.ndim == \u001b[32m2\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m np_array.shape[\u001b[32m0\u001b[39m] == np_array.shape[\u001b[32m1\u001b[39m], \u001b[33m\"\u001b[39m\u001b[33mInput must be a square matrix.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m111\u001b[39m     rows, cols = \u001b[43mnp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mnonzero\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnp_array\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    112\u001b[39m     edge_index = np.vstack((rows, cols))\n\u001b[32m    113\u001b[39m     edge_index_tensor = torch.from_numpy(edge_index).long()\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:1973\u001b[39m, in \u001b[36mnonzero\u001b[39m\u001b[34m(a)\u001b[39m\n\u001b[32m   1881\u001b[39m \u001b[38;5;129m@array_function_dispatch\u001b[39m(_nonzero_dispatcher)\n\u001b[32m   1882\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnonzero\u001b[39m(a):\n\u001b[32m   1883\u001b[39m \u001b[38;5;250m    \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m   1884\u001b[39m \u001b[33;03m    Return the indices of the elements that are non-zero.\u001b[39;00m\n\u001b[32m   1885\u001b[39m \n\u001b[32m   (...)\u001b[39m\u001b[32m   1971\u001b[39m \n\u001b[32m   1972\u001b[39m \u001b[33;03m    \"\"\"\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1973\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_wrapfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mnonzero\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:59\u001b[39m, in \u001b[36m_wrapfunc\u001b[39m\u001b[34m(obj, method, *args, **kwds)\u001b[39m\n\u001b[32m     56\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n\u001b[32m     58\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mbound\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     60\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[32m     61\u001b[39m     \u001b[38;5;66;03m# A TypeError occurs if the object does have such a method in its\u001b[39;00m\n\u001b[32m     62\u001b[39m     \u001b[38;5;66;03m# class, but its signature is not identical to that of NumPy's. This\u001b[39;00m\n\u001b[32m   (...)\u001b[39m\u001b[32m     66\u001b[39m     \u001b[38;5;66;03m# Call _wrapit from within the except clause to ensure a potential\u001b[39;00m\n\u001b[32m     67\u001b[39m     \u001b[38;5;66;03m# exception has a traceback chain.\u001b[39;00m\n\u001b[32m     68\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 原预训练部分调试\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6, \n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "bsne_model.train()\n",
    "optimizer.zero_grad()\n",
    "\n",
    "# batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "sample_size = args['sample_size']\n",
    "temperature = 100\n",
    "eps = 1e-10\n",
    "\n",
    "total_loss = 0.0\n",
    "batch_size = len(batch_centers)\n",
    "\n",
    "# 为每个中心节点构建Bp子图\n",
    "bp_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    # 根据距离矩阵计算采样概率\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    probs = np.exp(-dist_row)\n",
    "    probs[xi] = 0  # 排除自身\n",
    "    probs /= probs.sum()\n",
    "\n",
    "    # 采样邻居节点\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "# 为每个中心节点构建Bu子图（均匀采样）\n",
    "bu_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "    # 随机采样节点构建Bu子图\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bu_subgraphs.append(subgraph_nodes)\n",
    "# 计算Bp子图中所有节点的特征\n",
    "bp_features_batch = []\n",
    "for bp_nodes in bp_subgraphs:\n",
    "    bp_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bp_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bp子图所有节点的特征收集起来\n",
    "    bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 计算Bu子图中所有节点的特征\n",
    "bu_features_batch = []\n",
    "for bu_nodes in bu_subgraphs:\n",
    "    bu_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bu_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bu子图所有节点的特征收集起来\n",
    "    bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 针对每一对Bp和Bu子图计算loss\n",
    "for i in range(batch_size):\n",
    "    # 获取中心节点索引和特征\n",
    "    center_idx = batch_centers[i]\n",
    "    center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "    # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "    other_bp_indices = bp_subgraphs[i][1:]\n",
    "    other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的原始距离\n",
    "    orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的特征距离\n",
    "    feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "    # 计算P向量（基于节点路径距离）\n",
    "    P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "    P_sum = P.sum()\n",
    "    P = P / (P_sum + eps)\n",
    "    P = torch.tensor(P, device=device)\n",
    "\n",
    "    # 计算Q向量（基于特征距离）\n",
    "    Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    Q_sum = Q.sum()\n",
    "    Q = Q / (Q_sum + eps)\n",
    "    \n",
    "    # 局部损失（KL散度）\n",
    "    loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "\n",
    "    # 计算k_Bp\n",
    "    # 获取中心节点到图中所有节点的距离\n",
    "    all_dists = dist_matrix[center_idx]  # [N]\n",
    "    # 计算所有节点的未归一化概率值 (1 + d^2)^{-1}\n",
    "    unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "    # 排除中心节点自身（设置自身概率为0）\n",
    "    unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "    unnorm_probs_all[center_idx] = 0\n",
    "    Z_i = unnorm_probs_all.sum()\n",
    "    sum_p_cond = P.sum()\n",
    "    N = dist_matrix.shape[0]\n",
    "    k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "    # 计算中心节点到Bu子图节点的特征距离\n",
    "    bu_features = bu_features_batch[i]\n",
    "    feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "    # 计算指数距离之和\n",
    "    sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "    sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "    # 全局损失\n",
    "    global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "    loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "    # 合并损失\n",
    "    total_loss += loss_local + loss_global\n",
    "    print('P:',P)\n",
    "    print('Q:',Q)\n",
    "    print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "bsne_loss = total_loss / batch_size\n",
    "\n",
    "bsne_loss.backward()\n",
    "optimizer.step()\n",
    "\n",
    "# 更新最佳损失和早停计数器\n",
    "if bsne_loss.item() < pretrain_best_loss:\n",
    "    pretrain_best_loss = bsne_loss.item()\n",
    "    pretrain_no_improve = 0\n",
    "else:\n",
    "    pretrain_no_improve += 1\n",
    "\n",
    "if pretrain_no_improve >= args['pretrain_patience']:\n",
    "    print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "    pretrain_early_stop = True\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "491cb31d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "P: tensor([0.0951, 0.0350, 0.0350, 0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0350,\n",
      "        0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0350,\n",
      "        0.0350, 0.0951], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0870, 0.0252, 0.0122, 0.1046, 0.0083, 0.0110, 0.0215, 0.0485, 0.0126,\n",
      "        0.0360, 0.0280, 0.0155, 0.2044, 0.0891, 0.0310, 0.0265, 0.0356, 0.0953,\n",
      "        0.0233, 0.0841], device='cuda:0')\n",
      "local_loss: 0.26816328109984583,global_loss: -1.3172113920079427\n",
      "P: tensor([0.0257, 0.0699, 0.0699, 0.0699, 0.0257, 0.0699, 0.0257, 0.0257, 0.0699,\n",
      "        0.0257, 0.0257, 0.0699, 0.0699, 0.0257, 0.0699, 0.0699, 0.0699, 0.0257,\n",
      "        0.0699, 0.0257], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0927, 0.0139, 0.0355, 0.0535, 0.1191, 0.0420, 0.0436, 0.0338, 0.0220,\n",
      "        0.0069, 0.0500, 0.0317, 0.0456, 0.0341, 0.0445, 0.0325, 0.0221, 0.1271,\n",
      "        0.1273, 0.0220], device='cuda:0')\n",
      "local_loss: 0.12610869781579812,global_loss: -1.2795556945524054\n",
      "P: tensor([0.0282, 0.0766, 0.0766, 0.0282, 0.0282, 0.0282, 0.0766, 0.0766, 0.0766,\n",
      "        0.0282, 0.0282, 0.0282, 0.0282, 0.0766, 0.0282, 0.0282, 0.0766, 0.0282,\n",
      "        0.0766, 0.0766], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0778, 0.0217, 0.0508, 0.0126, 0.0271, 0.0288, 0.0303, 0.0155, 0.1186,\n",
      "        0.0333, 0.0214, 0.0643, 0.0740, 0.0620, 0.1310, 0.0430, 0.0501, 0.0613,\n",
      "        0.0386, 0.0372], device='cuda:0')\n",
      "local_loss: 0.0598002396929537,global_loss: -0.6018635864099575\n",
      "P: tensor([0.0330, 0.0897, 0.0330, 0.0897, 0.0330, 0.0897, 0.0330, 0.0330, 0.0897,\n",
      "        0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0897, 0.0330,\n",
      "        0.0330, 0.0897], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0291, 0.0405, 0.0209, 0.0351, 0.0089, 0.0814, 0.0300, 0.0299, 0.0365,\n",
      "        0.0577, 0.1201, 0.0882, 0.0072, 0.0759, 0.0214, 0.0556, 0.0394, 0.0308,\n",
      "        0.0909, 0.1003], device='cuda:0')\n",
      "local_loss: 0.1172614689787698,global_loss: -1.084382291651389\n",
      "P: tensor([0.0372, 0.0372, 0.0372, 0.0372, 0.0372, 0.0372, 0.1012, 0.1012, 0.0372,\n",
      "        0.0372, 0.0372, 0.1012, 0.0372, 0.1012, 0.0372, 0.0372, 0.0372, 0.0372,\n",
      "        0.0372, 0.0372], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0872, 0.0453, 0.0809, 0.1055, 0.0554, 0.0238, 0.0239, 0.0378, 0.0436,\n",
      "        0.0243, 0.1010, 0.0449, 0.0240, 0.0264, 0.0314, 0.0597, 0.0442, 0.0753,\n",
      "        0.0354, 0.0294], device='cuda:0')\n",
      "local_loss: 0.028813683215835707,global_loss: -0.38870563368974426\n",
      "P: tensor([0.0246, 0.0669, 0.0669, 0.0246, 0.0246, 0.0246, 0.0669, 0.0246, 0.0669,\n",
      "        0.0669, 0.0246, 0.0669, 0.0669, 0.0669, 0.0669, 0.0246, 0.0669, 0.0669,\n",
      "        0.0246, 0.0669], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0080, 0.0318, 0.0351, 0.0204, 0.0175, 0.0562, 0.0229, 0.1497, 0.0181,\n",
      "        0.1351, 0.0531, 0.1020, 0.0127, 0.1036, 0.0443, 0.0512, 0.0216, 0.0278,\n",
      "        0.0538, 0.0347], device='cuda:0')\n",
      "local_loss: 0.18434071831931248,global_loss: -1.1167904900255634\n",
      "P: tensor([0.0439, 0.0439, 0.0439, 0.0439, 0.0439, 0.0161, 0.0439, 0.0439, 0.0439,\n",
      "        0.1192, 0.0439, 0.0439, 0.1192, 0.0439, 0.0439, 0.0439, 0.0439, 0.0439,\n",
      "        0.0439, 0.0439], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0540, 0.0524, 0.0245, 0.0414, 0.0417, 0.0415, 0.0303, 0.0240, 0.0442,\n",
      "        0.0415, 0.0373, 0.0667, 0.0714, 0.0286, 0.0132, 0.0288, 0.0156, 0.2517,\n",
      "        0.0197, 0.0715], device='cuda:0')\n",
      "local_loss: 0.16411689181627467,global_loss: -0.9271910526311694\n",
      "P: tensor([0.0236, 0.0642, 0.0642, 0.0236, 0.0642, 0.0642, 0.0642, 0.0642, 0.0642,\n",
      "        0.0642, 0.0236, 0.0236, 0.0236, 0.0642, 0.0642, 0.0236, 0.0642, 0.0236,\n",
      "        0.0642, 0.0642], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0559, 0.0233, 0.0885, 0.0729, 0.0203, 0.0878, 0.0111, 0.0448, 0.0266,\n",
      "        0.0496, 0.0455, 0.0770, 0.1196, 0.0465, 0.0395, 0.0250, 0.0488, 0.0340,\n",
      "        0.0451, 0.0378], device='cuda:0')\n",
      "local_loss: 0.04376085387566986,global_loss: -0.9340696621677361\n"
     ]
    }
   ],
   "source": [
    "# 新预训练部分调试\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6, \n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "bsne_model.train()\n",
    "optimizer.zero_grad()\n",
    "\n",
    "# batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "sample_size = args['sample_size']\n",
    "temperature = 100\n",
    "eps = 1e-10\n",
    "\n",
    "total_loss = 0.0\n",
    "batch_size = len(batch_centers)\n",
    "\n",
    "# 为每个中心节点构建Bp子图\n",
    "bp_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    # 根据距离矩阵计算采样概率\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    probs = np.power(10.0, -dist_row)\n",
    "    probs[xi] = 0  # 排除自身\n",
    "    probs /= probs.sum()\n",
    "    \n",
    "    # 采样邻居节点\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "# 为每个中心节点构建Bu子图（均匀采样）\n",
    "bu_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "    # 随机采样节点构建Bu子图\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bu_subgraphs.append(subgraph_nodes)\n",
    "# 计算Bp子图中所有节点的特征\n",
    "bp_features_batch = []\n",
    "for bp_nodes in bp_subgraphs:\n",
    "    bp_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bp_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bp子图所有节点的特征收集起来\n",
    "    bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 计算Bu子图中所有节点的特征\n",
    "bu_features_batch = []\n",
    "for bu_nodes in bu_subgraphs:\n",
    "    bu_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bu_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bu子图所有节点的特征收集起来\n",
    "    bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 针对每一对Bp和Bu子图计算loss\n",
    "for i in range(batch_size):\n",
    "    # 获取中心节点索引和特征\n",
    "    center_idx = batch_centers[i]\n",
    "    center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "    # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "    other_bp_indices = bp_subgraphs[i][1:]\n",
    "    other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的原始距离\n",
    "    orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的特征距离\n",
    "    feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "    # 计算P向量（基于节点路径距离）\n",
    "#     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "    P = np.exp(-orig_dists_bp)\n",
    "#     P = np.power(10.0, -orig_dists_bp)\n",
    "    P_sum = P.sum()\n",
    "    P = P / (P_sum + eps)\n",
    "    P = torch.tensor(P, device=device)\n",
    "\n",
    "    # 计算Q向量（基于特征距离）\n",
    "#     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "#     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "    Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "    Q_sum = Q.sum()\n",
    "    Q = Q / (Q_sum + eps)\n",
    "    Q = torch.tensor(Q, device=device)\n",
    "    \n",
    "    # 局部损失（KL散度）\n",
    "#     loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "#     loss_local = (P * (torch.log(P + eps) - torch.log(Q + eps))).sum()\n",
    "    loss_local = (torch.log(P + eps) - torch.log(Q + eps)).mean()\n",
    "\n",
    "    # 计算k_Bp\n",
    "    # 获取中心节点到图中所有节点的距离\n",
    "    all_dists = dist_matrix[center_idx]  # [N]\n",
    "    \n",
    "#     \n",
    "#     unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "# \n",
    "    unnorm_probs_all = np.exp(-all_dists.cpu().numpy())\n",
    "    unnorm_probs_all[center_idx] = 0\n",
    "    Z_i = unnorm_probs_all.sum()\n",
    "    sum_p_cond = P.sum()\n",
    "    N = dist_matrix.shape[0]\n",
    "    k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "    # 计算中心节点到Bu子图节点的特征距离\n",
    "    bu_features = bu_features_batch[i]\n",
    "    feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "    # 计算指数距离之和\n",
    "    sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "    sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "    # 全局损失\n",
    "    global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "    loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "    # 合并损失\n",
    "    total_loss += loss_local + loss_global\n",
    "    print('P:',P)\n",
    "    print('Q:',Q)\n",
    "    print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "bsne_loss = total_loss / batch_size\n",
    "\n",
    "bsne_loss.backward()\n",
    "optimizer.step()\n",
    "\n",
    "# 更新最佳损失和早停计数器\n",
    "if bsne_loss.item() < pretrain_best_loss:\n",
    "    pretrain_best_loss = bsne_loss.item()\n",
    "    pretrain_no_improve = 0\n",
    "else:\n",
    "    pretrain_no_improve += 1\n",
    "\n",
    "if pretrain_no_improve >= args['pretrain_patience']:\n",
    "    print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "    pretrain_early_stop = True\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "2227bbc5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 5103: distance counts: {0: 1, 1: 459, 2: 11334, 3: 150}\n",
      "Center 4736: distance counts: {0: 1, 1: 1919, 2: 10005, 3: 19}\n",
      "Center 9863: distance counts: {0: 1, 1: 580, 2: 11188, 3: 175}\n",
      "Center 10791: distance counts: {0: 1, 1: 98, 2: 9597, 3: 2248}\n",
      "Center 3137: distance counts: {0: 1, 1: 237, 2: 11442, 3: 264}\n",
      "Center 1875: distance counts: {0: 1, 1: 865, 2: 10998, 3: 80}\n",
      "Center 2359: distance counts: {0: 1, 1: 87, 2: 10409, 3: 1447}\n",
      "Center 475: distance counts: {0: 1, 1: 1813, 2: 10098, 3: 32}\n"
     ]
    }
   ],
   "source": [
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    unique, counts = np.unique(dist_row, return_counts=True)\n",
    "    print(f\"Center {xi}: distance counts: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a177c51b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-0.7993, device='cuda:0', dtype=torch.float64, grad_fn=<DivBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bsne_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e686876",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3016331f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7fcf7bbe",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "1d60670e",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "04de2062",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "Pretrain Epoch: 000, BSNE Loss: 0.9251\n",
      "Pretrain Epoch: 001, BSNE Loss: 1.0607\n",
      "Pretrain Epoch: 002, BSNE Loss: 0.8667\n",
      "Pretrain Epoch: 003, BSNE Loss: 0.7298\n",
      "Pretrain Epoch: 004, BSNE Loss: 0.9731\n",
      "Pretrain Epoch: 005, BSNE Loss: 0.5254\n",
      "Pretrain Epoch: 006, BSNE Loss: 1.2662\n",
      "Pretrain Epoch: 007, BSNE Loss: 0.9251\n",
      "Pretrain Epoch: 008, BSNE Loss: 0.7718\n",
      "Pretrain Epoch: 009, BSNE Loss: 1.4034\n",
      "Pretrain Epoch: 010, BSNE Loss: 0.6741\n",
      "Pretrain Epoch: 011, BSNE Loss: 0.9547\n",
      "Pretrain Epoch: 012, BSNE Loss: 1.2258\n",
      "Pretrain Epoch: 013, BSNE Loss: 1.0642\n",
      "Pretrain Epoch: 014, BSNE Loss: 0.5815\n",
      "Pretrain Epoch: 015, BSNE Loss: 1.0916\n",
      "Pretrain Epoch: 016, BSNE Loss: 0.8268\n",
      "Pretrain Epoch: 017, BSNE Loss: 0.8952\n",
      "Pretrain Epoch: 018, BSNE Loss: 1.2227\n",
      "Pretrain Epoch: 019, BSNE Loss: 1.1376\n",
      "Pretrain Epoch: 020, BSNE Loss: 1.0631\n",
      "Pretrain Epoch: 021, BSNE Loss: 0.6976\n",
      "Pretrain Epoch: 022, BSNE Loss: 0.7302\n",
      "Pretrain Epoch: 023, BSNE Loss: 1.3321\n",
      "Pretrain Epoch: 024, BSNE Loss: 0.7472\n",
      "Pretrain Epoch: 025, BSNE Loss: 1.0206\n",
      "Pretrain Epoch: 026, BSNE Loss: 0.9225\n",
      "Pretrain Epoch: 027, BSNE Loss: 1.1121\n",
      "Pretrain Epoch: 028, BSNE Loss: 0.9738\n",
      "Pretrain Epoch: 029, BSNE Loss: 1.1386\n",
      "Pretrain Epoch: 030, BSNE Loss: 0.7647\n",
      "Pretrain Epoch: 031, BSNE Loss: 0.9578\n",
      "Pretrain Epoch: 032, BSNE Loss: 0.9829\n",
      "Pretrain Epoch: 033, BSNE Loss: 1.0269\n",
      "Pretrain Epoch: 034, BSNE Loss: 0.7071\n",
      "Pretrain Epoch: 035, BSNE Loss: 1.2049\n",
      "Pretrain Epoch: 036, BSNE Loss: 1.2944\n",
      "Pretrain Epoch: 037, BSNE Loss: 1.2441\n",
      "Pretrain Epoch: 038, BSNE Loss: 1.0130\n",
      "Pretrain Epoch: 039, BSNE Loss: 1.0543\n",
      "Pretrain Epoch: 040, BSNE Loss: 0.9896\n",
      "Pretrain Epoch: 041, BSNE Loss: 1.2006\n",
      "Pretrain Epoch: 042, BSNE Loss: 0.8911\n",
      "Pretrain Epoch: 043, BSNE Loss: 1.0002\n",
      "Pretrain Epoch: 044, BSNE Loss: 0.8504\n",
      "Pretrain Epoch: 045, BSNE Loss: 1.2384\n",
      "Pretrain Epoch: 046, BSNE Loss: 0.4706\n",
      "Pretrain Epoch: 047, BSNE Loss: 0.9769\n",
      "Pretrain Epoch: 048, BSNE Loss: 0.9156\n",
      "Pretrain Epoch: 049, BSNE Loss: 0.6310\n",
      "Pretrain Epoch: 050, BSNE Loss: 1.5985\n",
      "Pretrain Epoch: 051, BSNE Loss: 1.1147\n",
      "Pretrain Epoch: 052, BSNE Loss: 0.8704\n",
      "Pretrain Epoch: 053, BSNE Loss: 1.0085\n",
      "Pretrain Epoch: 054, BSNE Loss: 1.0345\n",
      "Pretrain Epoch: 055, BSNE Loss: 1.0096\n",
      "Pretrain Epoch: 056, BSNE Loss: 1.1974\n",
      "Pretrain Epoch: 057, BSNE Loss: 1.1007\n",
      "Pretrain Epoch: 058, BSNE Loss: 1.1983\n",
      "Pretrain Epoch: 059, BSNE Loss: 0.6808\n",
      "Pretrain Epoch: 060, BSNE Loss: 1.0690\n",
      "Pretrain Epoch: 061, BSNE Loss: 0.8087\n",
      "Pretrain Epoch: 062, BSNE Loss: 1.1238\n",
      "Pretrain Epoch: 063, BSNE Loss: 1.2625\n",
      "Pretrain Epoch: 064, BSNE Loss: 1.2583\n",
      "Pretrain Epoch: 065, BSNE Loss: 1.1739\n",
      "Pretrain Epoch: 066, BSNE Loss: 0.8399\n",
      "Pretrain Epoch: 067, BSNE Loss: 1.0283\n",
      "Pretrain Epoch: 068, BSNE Loss: 0.9651\n",
      "Pretrain Epoch: 069, BSNE Loss: 1.0843\n",
      "Pretrain Epoch: 070, BSNE Loss: 1.1090\n",
      "Pretrain Epoch: 071, BSNE Loss: 0.9699\n",
      "Pretrain Epoch: 072, BSNE Loss: 1.1696\n",
      "Pretrain Epoch: 073, BSNE Loss: 1.4694\n",
      "Pretrain Epoch: 074, BSNE Loss: 0.9565\n",
      "Pretrain Epoch: 075, BSNE Loss: 0.9166\n",
      "Pretrain Epoch: 076, BSNE Loss: 1.2671\n",
      "Pretrain Epoch: 077, BSNE Loss: 1.0658\n",
      "Pretrain Epoch: 078, BSNE Loss: 0.6977\n",
      "Pretrain Epoch: 079, BSNE Loss: 0.9990\n",
      "Pretrain Epoch: 080, BSNE Loss: 1.1251\n",
      "Pretrain Epoch: 081, BSNE Loss: 1.2850\n",
      "Pretrain Epoch: 082, BSNE Loss: 1.3429\n",
      "Pretrain Epoch: 083, BSNE Loss: 1.1513\n",
      "Pretrain Epoch: 084, BSNE Loss: 0.8172\n",
      "Pretrain Epoch: 085, BSNE Loss: 0.7853\n",
      "Pretrain Epoch: 086, BSNE Loss: 0.8062\n",
      "Pretrain Epoch: 087, BSNE Loss: 0.7396\n",
      "Pretrain Epoch: 088, BSNE Loss: 0.7527\n",
      "Pretrain Epoch: 089, BSNE Loss: 0.6894\n",
      "Pretrain Epoch: 090, BSNE Loss: 0.7252\n",
      "Pretrain Epoch: 091, BSNE Loss: 1.2187\n",
      "Pretrain Epoch: 092, BSNE Loss: 0.7240\n",
      "Pretrain Epoch: 093, BSNE Loss: 0.9932\n",
      "Pretrain Epoch: 094, BSNE Loss: 1.1209\n",
      "Pretrain Epoch: 095, BSNE Loss: 0.8345\n",
      "Pretrain Epoch: 096, BSNE Loss: 1.1180\n",
      "Pretrain Epoch: 097, BSNE Loss: 1.1303\n",
      "Pretrain Epoch: 098, BSNE Loss: 0.5231\n",
      "Pretrain Epoch: 099, BSNE Loss: 1.1593\n",
      "Pretrain Epoch: 100, BSNE Loss: 1.0723\n",
      "Pretrain Epoch: 101, BSNE Loss: 1.4876\n",
      "Pretrain Epoch: 102, BSNE Loss: 1.0792\n",
      "Pretrain Epoch: 103, BSNE Loss: 0.9302\n",
      "Pretrain Epoch: 104, BSNE Loss: 1.1123\n",
      "Pretrain Epoch: 105, BSNE Loss: 0.8884\n",
      "Pretrain Epoch: 106, BSNE Loss: 1.0016\n",
      "Pretrain Epoch: 107, BSNE Loss: 1.0809\n",
      "Pretrain Epoch: 108, BSNE Loss: 0.5965\n",
      "Pretrain Epoch: 109, BSNE Loss: 1.1124\n",
      "Pretrain Epoch: 110, BSNE Loss: 0.8213\n",
      "Pretrain Epoch: 111, BSNE Loss: 1.0171\n",
      "Pretrain Epoch: 112, BSNE Loss: 1.0982\n",
      "Pretrain Epoch: 113, BSNE Loss: 0.9706\n",
      "Pretrain Epoch: 114, BSNE Loss: 1.2220\n",
      "Pretrain Epoch: 115, BSNE Loss: 0.9384\n",
      "Pretrain Epoch: 116, BSNE Loss: 1.3835\n",
      "Pretrain Epoch: 117, BSNE Loss: 1.0868\n",
      "Pretrain Epoch: 118, BSNE Loss: 1.0696\n",
      "Pretrain Epoch: 119, BSNE Loss: 1.2259\n",
      "Pretrain Epoch: 120, BSNE Loss: 0.6848\n",
      "Pretrain Epoch: 121, BSNE Loss: 0.6812\n",
      "Pretrain Epoch: 122, BSNE Loss: 0.8425\n",
      "Pretrain Epoch: 123, BSNE Loss: 1.5686\n",
      "Pretrain Epoch: 124, BSNE Loss: 1.1489\n",
      "Pretrain Epoch: 125, BSNE Loss: 0.7574\n",
      "Pretrain Epoch: 126, BSNE Loss: 1.0176\n",
      "Pretrain Epoch: 127, BSNE Loss: 0.5694\n",
      "Pretrain Epoch: 128, BSNE Loss: 0.9167\n",
      "Pretrain Epoch: 129, BSNE Loss: 0.9407\n",
      "Pretrain Epoch: 130, BSNE Loss: 0.7746\n",
      "Pretrain Epoch: 131, BSNE Loss: 0.8123\n",
      "Pretrain Epoch: 132, BSNE Loss: 0.7212\n",
      "Pretrain Epoch: 133, BSNE Loss: 1.2898\n",
      "Pretrain Epoch: 134, BSNE Loss: 0.4112\n",
      "Pretrain Epoch: 135, BSNE Loss: 1.3430\n",
      "Pretrain Epoch: 136, BSNE Loss: 1.0312\n",
      "Pretrain Epoch: 137, BSNE Loss: 0.8985\n",
      "Pretrain Epoch: 138, BSNE Loss: 1.3140\n",
      "Pretrain Epoch: 139, BSNE Loss: 0.5163\n",
      "Pretrain Epoch: 140, BSNE Loss: 1.0910\n",
      "Pretrain Epoch: 141, BSNE Loss: 1.0595\n",
      "Pretrain Epoch: 142, BSNE Loss: 1.1452\n",
      "Pretrain Epoch: 143, BSNE Loss: 1.0909\n",
      "Pretrain Epoch: 144, BSNE Loss: 1.0741\n",
      "Pretrain Epoch: 145, BSNE Loss: 0.8793\n",
      "Pretrain Epoch: 146, BSNE Loss: 0.9107\n",
      "Pretrain Epoch: 147, BSNE Loss: 0.8897\n",
      "Pretrain Epoch: 148, BSNE Loss: 0.8250\n",
      "Pretrain Epoch: 149, BSNE Loss: 0.9954\n",
      "Pretrain Epoch: 150, BSNE Loss: 1.0201\n",
      "Pretrain Epoch: 151, BSNE Loss: 0.8496\n",
      "Pretrain Epoch: 152, BSNE Loss: 1.1043\n",
      "Pretrain Epoch: 153, BSNE Loss: 0.7769\n",
      "Pretrain Epoch: 154, BSNE Loss: 1.2838\n",
      "Pretrain Epoch: 155, BSNE Loss: 0.8345\n",
      "Pretrain Epoch: 156, BSNE Loss: 1.2337\n",
      "Pretrain Epoch: 157, BSNE Loss: 0.6378\n",
      "Pretrain Epoch: 158, BSNE Loss: 0.7614\n",
      "Pretrain Epoch: 159, BSNE Loss: 0.9976\n",
      "Pretrain Epoch: 160, BSNE Loss: 0.6683\n",
      "Pretrain Epoch: 161, BSNE Loss: 1.5679\n",
      "Pretrain Epoch: 162, BSNE Loss: 0.8671\n",
      "Pretrain Epoch: 163, BSNE Loss: 1.0908\n",
      "Pretrain Epoch: 164, BSNE Loss: 1.0478\n",
      "Pretrain Epoch: 165, BSNE Loss: 0.6976\n",
      "Pretrain Epoch: 166, BSNE Loss: 1.1189\n",
      "Pretrain Epoch: 167, BSNE Loss: 0.8114\n",
      "Pretrain Epoch: 168, BSNE Loss: 1.0330\n",
      "Pretrain Epoch: 169, BSNE Loss: 1.0383\n",
      "Pretrain Epoch: 170, BSNE Loss: 0.9670\n",
      "Pretrain Epoch: 171, BSNE Loss: 0.9323\n",
      "Pretrain Epoch: 172, BSNE Loss: 0.9420\n",
      "Pretrain Epoch: 173, BSNE Loss: 1.0374\n",
      "Pretrain Epoch: 174, BSNE Loss: 1.1195\n",
      "Pretrain Epoch: 175, BSNE Loss: 1.0101\n",
      "Pretrain Epoch: 176, BSNE Loss: 0.6983\n",
      "Pretrain Epoch: 177, BSNE Loss: 1.0199\n",
      "Pretrain Epoch: 178, BSNE Loss: 0.8652\n",
      "Pretrain Epoch: 179, BSNE Loss: 1.0069\n",
      "Pretrain Epoch: 180, BSNE Loss: 1.0533\n",
      "Pretrain Epoch: 181, BSNE Loss: 1.0719\n",
      "Pretrain Epoch: 182, BSNE Loss: 0.7366\n",
      "Pretrain Epoch: 183, BSNE Loss: 1.4301\n",
      "Pretrain Epoch: 184, BSNE Loss: 0.9520\n",
      "Pretrain Epoch: 185, BSNE Loss: 0.7302\n",
      "Pretrain Epoch: 186, BSNE Loss: 0.6845\n",
      "Pretrain Epoch: 187, BSNE Loss: 1.2655\n",
      "Pretrain Epoch: 188, BSNE Loss: 0.8484\n",
      "Pretrain Epoch: 189, BSNE Loss: 0.6125\n",
      "Pretrain Epoch: 190, BSNE Loss: 0.7101\n",
      "Pretrain Epoch: 191, BSNE Loss: 0.8996\n",
      "Pretrain Epoch: 192, BSNE Loss: 1.2209\n",
      "Pretrain Epoch: 193, BSNE Loss: 0.6647\n",
      "Pretrain Epoch: 194, BSNE Loss: 1.0039\n",
      "Pretrain Epoch: 195, BSNE Loss: 0.9782\n",
      "Pretrain Epoch: 196, BSNE Loss: 1.3350\n",
      "Pretrain Epoch: 197, BSNE Loss: 1.3284\n",
      "Pretrain Epoch: 198, BSNE Loss: 0.8809\n",
      "Pretrain Epoch: 199, BSNE Loss: 0.9283\n",
      "\n",
      "Loss plots saved to local_loss.png, global_loss.png, bsne_loss.png, local_vs_global.png\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.1247 | Val AUC: 0.3627 | Val F1: 0.4751\n",
      "Epoch: 005 | Loss: 0.0110 | Val AUC: 0.7060 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0057 | Val AUC: 0.8020 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0137 | Val AUC: 0.8379 | Val F1: 0.4751\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 020 | Loss: 0.0125 | Val AUC: 0.8450 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0079 | Val AUC: 0.8564 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0044 | Val AUC: 0.8640 | Val F1: 0.4751\n",
      "Epoch: 035 | Loss: 0.0187 | Val AUC: 0.8751 | Val F1: 0.4751\n",
      "Epoch: 040 | Loss: 0.0072 | Val AUC: 0.8790 | Val F1: 0.4748\n",
      "Epoch: 045 | Loss: 0.0086 | Val AUC: 0.8795 | Val F1: 0.6435\n",
      "Epoch: 050 | Loss: 0.0051 | Val AUC: 0.8843 | Val F1: 0.7281\n",
      "Epoch: 055 | Loss: 0.0416 | Val AUC: 0.8838 | Val F1: 0.7320\n",
      "Epoch: 060 | Loss: 0.0379 | Val AUC: 0.8878 | Val F1: 0.7249\n",
      "Epoch: 065 | Loss: 0.0030 | Val AUC: 0.8891 | Val F1: 0.6987\n",
      "Epoch: 070 | Loss: 0.0328 | Val AUC: 0.8926 | Val F1: 0.7083\n",
      "Epoch: 075 | Loss: 0.0372 | Val AUC: 0.8903 | Val F1: 0.7358\n",
      "Epoch: 080 | Loss: 0.0158 | Val AUC: 0.8874 | Val F1: 0.7545\n",
      "Epoch: 085 | Loss: 0.1035 | Val AUC: 0.8858 | Val F1: 0.7521\n",
      "Epoch: 090 | Loss: 0.0114 | Val AUC: 0.8868 | Val F1: 0.7292\n",
      "Epoch: 095 | Loss: 0.0292 | Val AUC: 0.8837 | Val F1: 0.7351\n",
      "Epoch: 100 | Loss: 0.0232 | Val AUC: 0.8860 | Val F1: 0.7416\n",
      "Epoch: 105 | Loss: 0.0062 | Val AUC: 0.8889 | Val F1: 0.7392\n",
      "Epoch: 110 | Loss: 0.0203 | Val AUC: 0.8913 | Val F1: 0.7545\n",
      "Epoch: 115 | Loss: 0.0073 | Val AUC: 0.8862 | Val F1: 0.7523\n",
      "Epoch: 120 | Loss: 0.0152 | Val AUC: 0.8857 | Val F1: 0.7528\n",
      "Epoch: 125 | Loss: 0.0057 | Val AUC: 0.8860 | Val F1: 0.7561\n",
      "Epoch 00026: reducing learning rate of group 0 to 2.5000e-04.\n",
      "Epoch: 130 | Loss: 0.0095 | Val AUC: 0.8877 | Val F1: 0.7521\n",
      "Epoch: 135 | Loss: 0.0066 | Val AUC: 0.8861 | Val F1: 0.7492\n",
      "Epoch: 140 | Loss: 0.0062 | Val AUC: 0.8869 | Val F1: 0.7549\n",
      "Epoch: 145 | Loss: 0.0097 | Val AUC: 0.8884 | Val F1: 0.7548\n",
      "Epoch: 150 | Loss: 0.0025 | Val AUC: 0.8903 | Val F1: 0.7567\n",
      "Epoch: 155 | Loss: 0.1038 | Val AUC: 0.8897 | Val F1: 0.7594\n",
      "Epoch: 160 | Loss: 0.0684 | Val AUC: 0.8869 | Val F1: 0.7567\n",
      "Epoch: 165 | Loss: 0.0236 | Val AUC: 0.8871 | Val F1: 0.7603\n",
      "Epoch: 170 | Loss: 0.0306 | Val AUC: 0.8901 | Val F1: 0.7566\n",
      "Epoch: 175 | Loss: 0.0070 | Val AUC: 0.8915 | Val F1: 0.7561\n",
      "Epoch: 180 | Loss: 0.0488 | Val AUC: 0.8895 | Val F1: 0.7541\n",
      "Epoch 00037: reducing learning rate of group 0 to 1.2500e-04.\n",
      "Epoch: 185 | Loss: 0.1046 | Val AUC: 0.8928 | Val F1: 0.7612\n",
      "Epoch: 190 | Loss: 0.0202 | Val AUC: 0.8900 | Val F1: 0.7570\n",
      "Epoch: 195 | Loss: 0.0127 | Val AUC: 0.8942 | Val F1: 0.7528\n",
      "\n",
      "=== Final Test Results ===\n",
      "Test AUC: 0.8872 | Test AP: 0.4472 | Test F1: 0.7462 | G-mean: 0.8002\n"
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）\n",
    "\n",
    "\n",
    "    \n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "#     if pretrain_early_stop:\n",
    "#         break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "    \n",
    "    \n",
    "    epoch_local_losses = []\n",
    "    epoch_global_losses = []\n",
    "    \n",
    "    \n",
    "    # batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "    batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "        #     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "        #     P = np.power(10.0, -orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "    #     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    #     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "#         Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "#         Q = np.exp(-feat_dists_bp.cpu().numpy())\n",
    "#         Q_sum = Q.sum()\n",
    "#         Q = Q / (Q_sum + eps)\n",
    "#         Q = torch.tensor(Q, device=device)\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "#         log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        log_ratio = torch.log(((P + eps)/(Q + eps))**2)\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp (公式定义)\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features = bu_features_batch[i][1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10)**2)\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        epoch_local_losses.append(loss_local.item())\n",
    "        epoch_global_losses.append(loss_global.item())\n",
    "        \n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "        # 局部损失（KL散度）\n",
    "        loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "        \n",
    "\n",
    "        # 计算k_Bp\n",
    "        # 获取中心节点到图中所有节点的距离\n",
    "        all_dists = dist_matrix[center_idx]  # [N]\n",
    "\n",
    "    #     \n",
    "    #     unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "    # \n",
    "        unnorm_probs_all = np.exp(-all_dists.cpu().numpy())\n",
    "    #     \n",
    "    #     unnorm_probs_all = np.power(10.0, -all_dists.cpu().numpy())\n",
    "        # 排除中心节点自身（设置自身概率为0）\n",
    "    #     unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "        unnorm_probs_all[center_idx] = 0\n",
    "        Z_i = unnorm_probs_all.sum()\n",
    "        sum_p_cond = P.sum()\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "        # 计算中心节点到Bu子图节点的特征距离\n",
    "        bu_features = bu_features_batch[i]\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        # 计算指数距离之和\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        # 全局损失\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "        # 合并损失\n",
    "        total_loss += loss_local + loss_global\n",
    "#         print('P:',P)\n",
    "#         print('Q:',Q)\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "    bsne_loss = total_loss / batch_size\n",
    "    \n",
    "    \n",
    "    all_local_losses.append(epoch_local_losses)\n",
    "    all_global_losses.append(epoch_global_losses)\n",
    "    \n",
    "    avg_local = np.mean(epoch_local_losses)\n",
    "    avg_global = np.mean(epoch_global_losses)\n",
    "    epoch_avg_local.append(avg_local)\n",
    "    epoch_avg_global.append(avg_global)\n",
    "    \n",
    "    bsne_loss = total_loss / batch_size\n",
    "    \n",
    "    bsne_loss_item = bsne_loss.item()\n",
    "    all_bsne_losses.append(bsne_loss_item)\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "#     for name, parms in bsne_model.named_parameters():\n",
    "#         print('-->name:', name, '-->grad_requirs:', parms.requires_grad, '--weight', torch.mean(parms.data), ' -->grad_value:', torch.mean(parms.grad))\n",
    "    \n",
    "    # 更新最佳损失和早停计数器\n",
    "#     if bsne_loss.item() < pretrain_best_loss:\n",
    "#         pretrain_best_loss = bsne_loss.item()\n",
    "#         pretrain_no_improve = 0\n",
    "#     else:\n",
    "#         pretrain_no_improve += 1\n",
    "\n",
    "#     if pretrain_no_improve >= args['pretrain_patience']:\n",
    "#         print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "#         pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "    print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "#     if epoch % 5 == 0:\n",
    "#         print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "\n",
    "\n",
    "plot_losses(all_local_losses, 'Local Loss During Pretraining', 'Loss', 'local_loss.png')\n",
    "plot_losses(all_global_losses, 'Global Loss During Pretraining', 'Loss', 'global_loss.png')\n",
    "\n",
    "# 绘制BSNE总损失\n",
    "plt.figure(figsize=(12, 6))\n",
    "plt.plot(all_bsne_losses, 'g-', linewidth=2, label='BSNE Loss')\n",
    "plt.title('Total BSNE Loss During Pretraining')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.grid(True)\n",
    "plt.savefig('bsne_loss.png')\n",
    "plt.close()\n",
    "\n",
    "# 绘制局部和全局平均损失对比\n",
    "plt.figure(figsize=(12, 6))\n",
    "plt.plot(epoch_avg_local, 'b-', linewidth=2, label='Local Loss (Avg)')\n",
    "plt.plot(epoch_avg_global, 'r-', linewidth=2, label='Global Loss (Avg)')\n",
    "plt.title('Local vs Global Loss During Pretraining')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.legend()\n",
    "plt.grid(True)\n",
    "plt.savefig('local_vs_global.png')\n",
    "plt.close()\n",
    "\n",
    "print(\"\\nLoss plots saved to local_loss.png, global_loss.png, bsne_loss.png, local_vs_global.png\")\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75e283f9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.1378 | Val AUC: 0.8212 | Val F1: 0.6141\n",
      "Epoch: 005 | Loss: 0.0193 | Val AUC: 0.8727 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0123 | Val AUC: 0.8746 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0022 | Val AUC: 0.8833 | Val F1: 0.4751\n",
      "Epoch: 020 | Loss: 0.0048 | Val AUC: 0.9028 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0300 | Val AUC: 0.9101 | Val F1: 0.4872\n",
      "Epoch: 030 | Loss: 0.0079 | Val AUC: 0.9158 | Val F1: 0.4986\n",
      "Epoch: 035 | Loss: 0.0068 | Val AUC: 0.9182 | Val F1: 0.6396\n",
      "Epoch: 040 | Loss: 0.0107 | Val AUC: 0.9149 | Val F1: 0.7266\n",
      "Epoch: 045 | Loss: 0.0119 | Val AUC: 0.9153 | Val F1: 0.7486\n"
     ]
    }
   ],
   "source": [
    "# 仅进行分类测试（0622）\n",
    "\n",
    "\n",
    "    \n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd1f67a9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2b8c83f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "957909bd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3eb36305",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "02b8439d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5243e305",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ab1922bb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.1246, device='cuda:0', dtype=torch.float64)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_local"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a5853b24",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.0442, device='cuda:0', grad_fn=<LogBackward0>)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "138cfe78",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception ignored in: <bound method IPythonKernel._clean_thread_parent_frames of <ipykernel.ipkernel.IPythonKernel object at 0x155128571110>>\n",
      "Traceback (most recent call last):\n",
      "  File \"/data/home/sczc619/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/ipykernel/ipkernel.py\", line 775, in _clean_thread_parent_frames\n",
      "    def _clean_thread_parent_frames(\n",
      "\n",
      "KeyboardInterrupt: \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== Starting Pretraining ===\n",
      "local_loss: 0.04649203361754604,global_loss: 0.7916783094406128\n",
      "local_loss: 0.2331491567334159,global_loss: 0.2563435733318329\n",
      "local_loss: 0.2691877507078921,global_loss: 0.5739760398864746\n",
      "local_loss: 0.03359537086094142,global_loss: 0.2717357873916626\n",
      "local_loss: 0.11074298543141188,global_loss: 0.3434722423553467\n",
      "local_loss: 0.32222798161734717,global_loss: 0.5244402885437012\n",
      "local_loss: 0.20391086597742275,global_loss: -0.0530429482460022\n",
      "local_loss: 0.17770055519441072,global_loss: 0.48408275842666626\n",
      "Pretrain Epoch: 000, BSNE Loss: 0.5737\n",
      "local_loss: 0.13462019523010368,global_loss: 0.2992209196090698\n",
      "local_loss: 0.23640164412720852,global_loss: 0.894719660282135\n",
      "local_loss: 0.1669182903874427,global_loss: -0.16131554543972015\n",
      "local_loss: 0.245586277797356,global_loss: 0.6521396040916443\n",
      "local_loss: 0.09679672108987275,global_loss: 0.5670851469039917\n",
      "local_loss: 0.1528492983184826,global_loss: 0.05248625949025154\n",
      "local_loss: 0.16769668034517737,global_loss: 0.48335278034210205\n",
      "local_loss: 0.15724568541301753,global_loss: 0.11374349147081375\n",
      "Pretrain Epoch: 001, BSNE Loss: 0.5324\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[43]\u001b[39m\u001b[32m, line 203\u001b[39m\n\u001b[32m    200\u001b[39m     node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n\u001b[32m    202\u001b[39m     \u001b[38;5;66;03m# 获取节点特征\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m203\u001b[39m     _, node_feature = \u001b[43mbsne_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43m[\u001b[49m\u001b[43mnode_subgraph\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    205\u001b[39m     bp_node_features.append(node_feature.squeeze(\u001b[32m0\u001b[39m))\n\u001b[32m    207\u001b[39m \u001b[38;5;66;03m# 将该Bp子图所有节点的特征收集起来\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 412\u001b[39m, in \u001b[36mBSNE_Transformer.forward\u001b[39m\u001b[34m(self, subgraph_batch)\u001b[39m\n\u001b[32m    409\u001b[39m     adj_mask = adj_mask.reshape(-\u001b[32m1\u001b[39m, num_nodes, num_nodes)\n\u001b[32m    411\u001b[39m     \u001b[38;5;66;03m# 关系编码\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m412\u001b[39m     encoder_output = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mrelation_encoders\u001b[49m\u001b[43m[\u001b[49m\u001b[43mrel_idx\u001b[49m\u001b[43m]\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m    413\u001b[39m \u001b[43m        \u001b[49m\u001b[43msrc\u001b[49m\u001b[43m=\u001b[49m\u001b[43mfeatures\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    414\u001b[39m \u001b[43m        \u001b[49m\u001b[43mmask\u001b[49m\u001b[43m=\u001b[49m\u001b[43madj_mask\u001b[49m\n\u001b[32m    415\u001b[39m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    416\u001b[39m     rel_outputs.append(encoder_output)\n\u001b[32m    418\u001b[39m \u001b[38;5;66;03m# 合并多关系特征\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/transformer.py:387\u001b[39m, in \u001b[36mTransformerEncoder.forward\u001b[39m\u001b[34m(self, src, mask, src_key_padding_mask, is_causal)\u001b[39m\n\u001b[32m    384\u001b[39m is_causal = _detect_is_causal_mask(mask, is_causal, seq_len)\n\u001b[32m    386\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m mod \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.layers:\n\u001b[32m--> \u001b[39m\u001b[32m387\u001b[39m     output = \u001b[43mmod\u001b[49m\u001b[43m(\u001b[49m\u001b[43moutput\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msrc_mask\u001b[49m\u001b[43m=\u001b[49m\u001b[43mmask\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mis_causal\u001b[49m\u001b[43m=\u001b[49m\u001b[43mis_causal\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msrc_key_padding_mask\u001b[49m\u001b[43m=\u001b[49m\u001b[43msrc_key_padding_mask_for_layers\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    389\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m convert_to_nested:\n\u001b[32m    390\u001b[39m     output = output.to_padded_tensor(\u001b[32m0.\u001b[39m, src.size())\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/transformer.py:708\u001b[39m, in \u001b[36mTransformerEncoderLayer.forward\u001b[39m\u001b[34m(self, src, src_mask, src_key_padding_mask, is_causal)\u001b[39m\n\u001b[32m    706\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m    707\u001b[39m     x = \u001b[38;5;28mself\u001b[39m.norm1(x + \u001b[38;5;28mself\u001b[39m._sa_block(x, src_mask, src_key_padding_mask, is_causal=is_causal))\n\u001b[32m--> \u001b[39m\u001b[32m708\u001b[39m     x = \u001b[38;5;28mself\u001b[39m.norm2(x + \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_ff_block\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[32m    710\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m x\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/transformer.py:723\u001b[39m, in \u001b[36mTransformerEncoderLayer._ff_block\u001b[39m\u001b[34m(self, x)\u001b[39m\n\u001b[32m    722\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_ff_block\u001b[39m(\u001b[38;5;28mself\u001b[39m, x: Tensor) -> Tensor:\n\u001b[32m--> \u001b[39m\u001b[32m723\u001b[39m     x = \u001b[38;5;28mself\u001b[39m.linear2(\u001b[38;5;28mself\u001b[39m.dropout(\u001b[38;5;28mself\u001b[39m.activation(\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mlinear1\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m)))\n\u001b[32m    724\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m.dropout2(x)\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/linear.py:114\u001b[39m, in \u001b[36mLinear.forward\u001b[39m\u001b[34m(self, input)\u001b[39m\n\u001b[32m    113\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m: Tensor) -> Tensor:\n\u001b[32m--> \u001b[39m\u001b[32m114\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m F.linear(\u001b[38;5;28minput\u001b[39m, \u001b[38;5;28mself\u001b[39m.weight, \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mbias\u001b[49m)\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1682\u001b[39m, in \u001b[36mModule.__getattr__\u001b[39m\u001b[34m(self, name)\u001b[39m\n\u001b[32m   1673\u001b[39m         \u001b[38;5;28mself\u001b[39m._backward_pre_hooks = OrderedDict()\n\u001b[32m   1675\u001b[39m \u001b[38;5;66;03m# On the return type:\u001b[39;00m\n\u001b[32m   1676\u001b[39m \u001b[38;5;66;03m# We choose to return `Any` in the `__getattr__` type signature instead of a more strict `Union[Tensor, Module]`.\u001b[39;00m\n\u001b[32m   1677\u001b[39m \u001b[38;5;66;03m# This is done for better interop with various type checkers for the end users.\u001b[39;00m\n\u001b[32m   (...)\u001b[39m\u001b[32m   1680\u001b[39m \u001b[38;5;66;03m# See full discussion on the problems with returning `Union` here\u001b[39;00m\n\u001b[32m   1681\u001b[39m \u001b[38;5;66;03m# https://github.com/microsoft/pyright/issues/4213\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1682\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__getattr__\u001b[39m(\u001b[38;5;28mself\u001b[39m, name: \u001b[38;5;28mstr\u001b[39m) -> Any:\n\u001b[32m   1683\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[33m'\u001b[39m\u001b[33m_parameters\u001b[39m\u001b[33m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.\u001b[34m__dict__\u001b[39m:\n\u001b[32m   1684\u001b[39m         _parameters = \u001b[38;5;28mself\u001b[39m.\u001b[34m__dict__\u001b[39m[\u001b[33m'\u001b[39m\u001b[33m_parameters\u001b[39m\u001b[33m'\u001b[39m]\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 仅进行预训练测试（新）\n",
    "def plot_losses(losses, title, ylabel, filename):\n",
    "    plt.figure(figsize=(12, 6))\n",
    "    \n",
    "    # 绘制每个batch的详细损失（浅色点）\n",
    "    for epoch, batch_losses in enumerate(losses):\n",
    "        plt.scatter([epoch] * len(batch_losses), batch_losses, \n",
    "                   color='lightblue', alpha=0.4, s=10)\n",
    "    \n",
    "    # 绘制每个epoch的平均损失（深色线）\n",
    "    avg_losses = [np.mean(batch_losses) for batch_losses in losses]\n",
    "    plt.plot(avg_losses, 'b-', linewidth=2, label='Epoch Average')\n",
    "    \n",
    "    # 绘制滑动平均线（窗口=5）\n",
    "    window = 5\n",
    "    moving_avg = [np.mean(avg_losses[max(0, i-window):i+1]) \n",
    "                 for i in range(len(avg_losses))]\n",
    "    plt.plot(moving_avg, 'r--', linewidth=2, label=f'{window}-Epoch Moving Avg')\n",
    "    \n",
    "    plt.title(title)\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel(ylabel)\n",
    "    plt.legend()\n",
    "    plt.grid(True)\n",
    "    plt.savefig(filename)\n",
    "    plt.close()\n",
    "    \n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "#     if pretrain_early_stop:\n",
    "#         break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "    \n",
    "    \n",
    "    epoch_local_losses = []\n",
    "    epoch_global_losses = []\n",
    "    \n",
    "    \n",
    "    # batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "    batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            # 计算中心节点到其他Bp节点的原始距离\n",
    "            orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "            # 计算P向量（基于节点路径距离）\n",
    "        #     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "            P = np.exp(-orig_dists_bp)\n",
    "        #     P = np.power(10.0, -orig_dists_bp)\n",
    "            P_sum = P.sum()\n",
    "            P = P / (P_sum + eps)\n",
    "            P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "    #     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    #     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "#         Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "        feat_dists_bp.requires_grad_(True)\n",
    "#         Q = np.exp(-feat_dists_bp.cpu().numpy())\n",
    "#         Q_sum = Q.sum()\n",
    "#         Q = Q / (Q_sum + eps)\n",
    "#         Q = torch.tensor(Q, device=device)\n",
    "        \n",
    "        Q = torch.softmax(-feat_dists_bp, dim=0)\n",
    "        \n",
    "        log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        loss_local = log_ratio.mean() \n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp (公式定义)\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features = bu_features_batch[i][1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        epoch_local_losses.append(loss_local.item())\n",
    "        epoch_global_losses.append(loss_global.item())\n",
    "        \n",
    "        print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "        # 局部损失（KL散度）\n",
    "        loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "        \n",
    "\n",
    "        # 计算k_Bp\n",
    "        # 获取中心节点到图中所有节点的距离\n",
    "        all_dists = dist_matrix[center_idx]  # [N]\n",
    "\n",
    "    #     \n",
    "    #     unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "    # \n",
    "        unnorm_probs_all = np.exp(-all_dists.cpu().numpy())\n",
    "    #     \n",
    "    #     unnorm_probs_all = np.power(10.0, -all_dists.cpu().numpy())\n",
    "        # 排除中心节点自身（设置自身概率为0）\n",
    "    #     unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "        unnorm_probs_all[center_idx] = 0\n",
    "        Z_i = unnorm_probs_all.sum()\n",
    "        sum_p_cond = P.sum()\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "        # 计算中心节点到Bu子图节点的特征距离\n",
    "        bu_features = bu_features_batch[i]\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        # 计算指数距离之和\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        # 全局损失\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "        # 合并损失\n",
    "        total_loss += loss_local + loss_global\n",
    "#         print('P:',P)\n",
    "#         print('Q:',Q)\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "    bsne_loss = total_loss / batch_size\n",
    "    \n",
    "    \n",
    "    all_local_losses.append(epoch_local_losses)\n",
    "    all_global_losses.append(epoch_global_losses)\n",
    "    \n",
    "    avg_local = np.mean(epoch_local_losses)\n",
    "    avg_global = np.mean(epoch_global_losses)\n",
    "    epoch_avg_local.append(avg_local)\n",
    "    epoch_avg_global.append(avg_global)\n",
    "    \n",
    "    bsne_loss = total_loss / batch_size\n",
    "    \n",
    "    bsne_loss_item = bsne_loss.item()\n",
    "    all_bsne_losses.append(bsne_loss_item)\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "#     for name, parms in bsne_model.named_parameters():\n",
    "#         print('-->name:', name, '-->grad_requirs:', parms.requires_grad, '--weight', torch.mean(parms.data), ' -->grad_value:', torch.mean(parms.grad))\n",
    "    \n",
    "    # 更新最佳损失和早停计数器\n",
    "#     if bsne_loss.item() < pretrain_best_loss:\n",
    "#         pretrain_best_loss = bsne_loss.item()\n",
    "#         pretrain_no_improve = 0\n",
    "#     else:\n",
    "#         pretrain_no_improve += 1\n",
    "\n",
    "#     if pretrain_no_improve >= args['pretrain_patience']:\n",
    "#         print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "#         pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "    print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "#     if epoch % 5 == 0:\n",
    "#         print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "\n",
    "\n",
    "plot_losses(all_local_losses, 'Local Loss During Pretraining', 'Loss', 'local_loss.png')\n",
    "plot_losses(all_global_losses, 'Global Loss During Pretraining', 'Loss', 'global_loss.png')\n",
    "\n",
    "# 绘制BSNE总损失\n",
    "plt.figure(figsize=(12, 6))\n",
    "plt.plot(all_bsne_losses, 'g-', linewidth=2, label='BSNE Loss')\n",
    "plt.title('Total BSNE Loss During Pretraining')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.grid(True)\n",
    "plt.savefig('bsne_loss.png')\n",
    "plt.close()\n",
    "\n",
    "# 绘制局部和全局平均损失对比\n",
    "plt.figure(figsize=(12, 6))\n",
    "plt.plot(epoch_avg_local, 'b-', linewidth=2, label='Local Loss (Avg)')\n",
    "plt.plot(epoch_avg_global, 'r-', linewidth=2, label='Global Loss (Avg)')\n",
    "plt.title('Local vs Global Loss During Pretraining')\n",
    "plt.xlabel('Epoch')\n",
    "plt.ylabel('Loss')\n",
    "plt.legend()\n",
    "plt.grid(True)\n",
    "plt.savefig('local_vs_global.png')\n",
    "plt.close()\n",
    "\n",
    "print(\"\\nLoss plots saved to local_loss.png, global_loss.png, bsne_loss.png, local_vs_global.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "652e229e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "Loss plots saved to local_loss.png, global_loss.png, bsne_loss.png, local_vs_global.png\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "dc3fa11e",
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_losses(all_local_losses, 'Local Loss During Pretraining', 'Loss', 'local_loss.png')\n",
    "plot_losses(all_global_losses, 'Global Loss During Pretraining', 'Loss', 'global_loss.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "3d591c99",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([17.6802, 17.6685, 17.6395, 18.7998, 17.3093, 18.0573, 18.1619, 16.6508,\n",
       "        17.5100, 18.1675, 17.6263, 17.3141, 16.0878, 18.0150, 18.2572, 18.2173,\n",
       "        17.7007, 17.6163, 17.5833, 17.8705], device='cuda:0',\n",
       "       grad_fn=<SqueezeBackward1>)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "feat_dists_bp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "0be56a8b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0417, 0.0422, 0.0434, 0.0136, 0.0604, 0.0286, 0.0258, 0.1167, 0.0494,\n",
       "        0.0256, 0.0440, 0.0601, 0.2050, 0.0298, 0.0234, 0.0244, 0.0409, 0.0445,\n",
       "        0.0459, 0.0345], device='cuda:0', grad_fn=<SoftmaxBackward0>)"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.softmax(-feat_dists_bp, dim=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "a6d95a88",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2.0969662e-08, 2.1216358e-08, 2.1839419e-08, 6.8447910e-09,\n",
       "       3.0384253e-08, 1.4381880e-08, 1.2953725e-08, 5.8699364e-08,\n",
       "       2.4860469e-08, 1.2881486e-08, 2.2130978e-08, 3.0238908e-08,\n",
       "       1.0307503e-07, 1.5002987e-08, 1.1776406e-08, 1.2255058e-08,\n",
       "       2.0543842e-08, 2.2353788e-08, 2.3102293e-08, 1.7336182e-08],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.exp(-feat_dists_bp.detach().cpu().numpy())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95360e59",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "116f30d6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b90f5a9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.0738 | Val AUC: 0.7377 | Val F1: 0.4813\n",
      "Epoch: 005 | Loss: 0.0327 | Val AUC: 0.7711 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0570 | Val AUC: 0.7901 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0130 | Val AUC: 0.7999 | Val F1: 0.4751\n",
      "Epoch: 020 | Loss: 0.0194 | Val AUC: 0.8048 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0133 | Val AUC: 0.8058 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0061 | Val AUC: 0.8073 | Val F1: 0.4751\n",
      "Epoch: 040 | Loss: 0.0052 | Val AUC: 0.8104 | Val F1: 0.4751\n",
      "Epoch: 045 | Loss: 0.0132 | Val AUC: 0.8145 | Val F1: 0.4751\n",
      "Epoch: 050 | Loss: 0.0106 | Val AUC: 0.8172 | Val F1: 0.4751\n",
      "Epoch: 055 | Loss: 0.0579 | Val AUC: 0.8230 | Val F1: 0.4751\n",
      "Epoch: 060 | Loss: 0.0575 | Val AUC: 0.8258 | Val F1: 0.4751\n",
      "Epoch: 065 | Loss: 0.0387 | Val AUC: 0.8291 | Val F1: 0.4751\n",
      "Epoch: 070 | Loss: 0.0201 | Val AUC: 0.8295 | Val F1: 0.4751\n",
      "Epoch: 075 | Loss: 0.0790 | Val AUC: 0.8334 | Val F1: 0.4751\n",
      "Epoch: 080 | Loss: 0.0061 | Val AUC: 0.8310 | Val F1: 0.4751\n",
      "Epoch: 085 | Loss: 0.1036 | Val AUC: 0.8356 | Val F1: 0.4751\n",
      "Epoch: 090 | Loss: 0.0074 | Val AUC: 0.8397 | Val F1: 0.4751\n",
      "Epoch: 095 | Loss: 0.0090 | Val AUC: 0.8435 | Val F1: 0.4751\n",
      "Epoch: 100 | Loss: 0.0235 | Val AUC: 0.8443 | Val F1: 0.4751\n",
      "Epoch: 105 | Loss: 0.0167 | Val AUC: 0.8424 | Val F1: 0.4813\n",
      "Epoch: 110 | Loss: 0.0604 | Val AUC: 0.8468 | Val F1: 0.4811\n",
      "Epoch: 115 | Loss: 0.0043 | Val AUC: 0.8485 | Val F1: 0.4811\n",
      "Epoch: 120 | Loss: 0.1123 | Val AUC: 0.8517 | Val F1: 0.4811\n",
      "Epoch: 125 | Loss: 0.0068 | Val AUC: 0.8527 | Val F1: 0.4811\n",
      "Epoch: 130 | Loss: 0.0340 | Val AUC: 0.8542 | Val F1: 0.4927\n",
      "Epoch: 135 | Loss: 0.0160 | Val AUC: 0.8595 | Val F1: 0.5102\n",
      "Epoch: 140 | Loss: 0.0374 | Val AUC: 0.8611 | Val F1: 0.5240\n"
     ]
    }
   ],
   "source": [
    "# 仅进行分类测试（新）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0001,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67151168",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "237718be",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e77289c4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c837fb15",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5365c63",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7bb7c130",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0732cb94",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "db830b11",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(5.0090, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Q_sum"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5b739c47",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2 1 1 1 2 2 2 2 2 2 2 1 2 1 2 2 2 2 2 2]\n",
      "P: tensor([0.0364, 0.0909, 0.0909, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364, 0.0364,\n",
      "        0.0364, 0.0364, 0.0909, 0.0364, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364,\n",
      "        0.0364, 0.0364], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0481, 0.0558, 0.0499, 0.0491, 0.0554, 0.0540, 0.0510, 0.0545, 0.0443,\n",
      "        0.0532, 0.0520, 0.0463, 0.0474, 0.0519, 0.0491, 0.0479, 0.0514, 0.0430,\n",
      "        0.0455, 0.0504], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "tensor(1.0000, device='cuda:0', dtype=torch.float64)\n",
      "tensor(1., device='cuda:0', grad_fn=<SumBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "print(\"Q:\",Q)\n",
    "print(P.sum())\n",
    "print(Q.sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "d8b4d0d6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0, 2, 2,  ..., 2, 2, 2],\n",
       "        [2, 0, 2,  ..., 2, 2, 2],\n",
       "        [2, 2, 0,  ..., 2, 2, 2],\n",
       "        ...,\n",
       "        [2, 2, 2,  ..., 0, 1, 1],\n",
       "        [2, 2, 2,  ..., 1, 0, 1],\n",
       "        [2, 2, 2,  ..., 1, 1, 0]], device='cuda:0', dtype=torch.int32)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dist_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3eac6f4e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0364, 0.0909, 0.0909, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364, 0.0364,\n",
       "        0.0364, 0.0364, 0.0909, 0.0364, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364,\n",
       "        0.0364, 0.0364], device='cuda:0', dtype=torch.float64)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "P"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ec3d1ca5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([8.01726913e-05, 8.01726913e-05, 8.01726913e-05, ...,\n",
       "       8.01726913e-05, 8.01726913e-05, 8.01726913e-05])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2fdb02fe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 5103: distance counts: {0: 1, 1: 459, 2: 11334, 3: 150}\n",
      "Center 4736: distance counts: {0: 1, 1: 1919, 2: 10005, 3: 19}\n",
      "Center 9863: distance counts: {0: 1, 1: 580, 2: 11188, 3: 175}\n",
      "Center 10791: distance counts: {0: 1, 1: 98, 2: 9597, 3: 2248}\n",
      "Center 3137: distance counts: {0: 1, 1: 237, 2: 11442, 3: 264}\n",
      "Center 1875: distance counts: {0: 1, 1: 865, 2: 10998, 3: 80}\n",
      "Center 2359: distance counts: {0: 1, 1: 87, 2: 10409, 3: 1447}\n",
      "Center 475: distance counts: {0: 1, 1: 1813, 2: 10098, 3: 32}\n"
     ]
    }
   ],
   "source": [
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    unique, counts = np.unique(dist_row, return_counts=True)\n",
    "    print(f\"Center {xi}: distance counts: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "42b1d988",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 5103,  4736,  9863, 10791,  3137,  1875,  2359,   475])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_centers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "20ee4022",
   "metadata": {},
   "outputs": [],
   "source": [
    "dist = dist_matrix[6071].cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "527696e8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 2, 2, ..., 2, 2, 2], dtype=int32)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "d3f4a9f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "pb = np.exp(-dist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "82ad1aba",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.13533528, 0.13533528, 0.13533528, ..., 0.13533528, 0.13533528,\n",
       "       0.13533528])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "bcac101f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([7.62116273e-05, 7.62116273e-05, 7.62116273e-05, ...,\n",
       "       7.62116273e-05, 7.62116273e-05, 7.62116273e-05])"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pb /= pb.sum()\n",
    "pb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "05d677ed",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 6071: probs: {2.803669087973917e-05: 102, 7.621162734851842e-05: 11122, 0.00020716468173877003: 719, 0.0005631319898689999: 1}\n"
     ]
    }
   ],
   "source": [
    "\n",
    "unique, counts = np.unique(pb, return_counts=True)\n",
    "print(f\"Center 6071: probs: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "fd098860",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.8476257193702219"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "11122*7.621162734851842e-05"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "7a20b676",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.03665072072437298"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.8476257193702219**20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6739417b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b837e37a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1743808b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39aabd10",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
