{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "e7f126eb",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/data/home/sczc619/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import os\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import copy\n",
    "import copy as cp\n",
    "import dgl\n",
    "from collections import defaultdict\n",
    "import matplotlib.pyplot as plt\n",
    "import time\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.manifold import TSNE\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, \\\n",
    "    confusion_matrix\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "\n",
    "\n",
    "def create_node_subgraph(node_idx, feat_data, edge_indexs, device):\n",
    "    \"\"\"\n",
    "    为单个节点创建一阶邻居子图（所有邻居）\n",
    "    \"\"\"\n",
    "    neighbors = set()\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        # 找出以中心点为起点的边的终点\n",
    "        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()\n",
    "        neighbors.update(rel_neighbors)\n",
    "\n",
    "    # 移除中心节点自身\n",
    "    neighbors.discard(node_idx)\n",
    "    neighbors = list(neighbors)\n",
    "\n",
    "    # 如果邻居太多，进行随机采样截取\n",
    "    sample_size = 399\n",
    "    if len(neighbors) > sample_size:\n",
    "        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()\n",
    "\n",
    "    # 构建子图节点列表，确保中心节点是第一个\n",
    "    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]\n",
    "\n",
    "    # 构建子图边列表\n",
    "    sub_edge_index = []\n",
    "    for rel_idx in range(len(edge_indexs)):\n",
    "        edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "        local_edges = edge_index[:, mask]\n",
    "\n",
    "        # 创建节点映射\n",
    "        node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "\n",
    "        # 将全局索引映射到局部索引\n",
    "        if len(local_edges) > 0 and local_edges.size > 0:\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "        else:\n",
    "            # 添加自环确保图不为空\n",
    "            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)\n",
    "\n",
    "        sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "    # 创建子图数据\n",
    "    subgraph = {\n",
    "        'features': feat_data[sub_nodes].clone(),\n",
    "        'edges': sub_edge_index,\n",
    "        'global_idx': sub_nodes\n",
    "    }\n",
    "\n",
    "    return subgraph\n",
    "\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree],\n",
    "                [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def Visualization(labels, embedding, prefix):\n",
    "    train_pos, train_neg = pos_neg_split(list(range(len(labels))), labels)\n",
    "    sampled_idx_train = undersample(train_pos, train_neg, scale=1)\n",
    "    tsne = TSNE(n_components=2, random_state=43)\n",
    "    sampled_idx_train = np.array(sampled_idx_train)\n",
    "    sampled_idx_train = np.random.choice(sampled_idx_train, size=5000, replace=True)\n",
    "    ps = embedding[sampled_idx_train]\n",
    "    ls = labels[sampled_idx_train]\n",
    "\n",
    "    X_reduced = tsne.fit_transform(ps)\n",
    "\n",
    "    scaler = MinMaxScaler(feature_range=(0, 1))\n",
    "    X_scaled = scaler.fit_transform(X_reduced)\n",
    "    print(X_scaled.shape)\n",
    "\n",
    "    plt.figure(figsize=(8, 8))\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 0, 0], X_scaled[ls == 0, 1], c='#14517C', label='Label 0', s=3)\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 1, 0], X_scaled[ls == 1, 1], c='#FA7F6F', label='Label 1', s=3)\n",
    "\n",
    "    ax = plt.gca()\n",
    "    ax.spines['top'].set_visible(False)\n",
    "    ax.spines['right'].set_visible(False)\n",
    "    ax.spines['left'].set_visible(False)\n",
    "    ax.spines['bottom'].set_visible(False)\n",
    "\n",
    "    plt.xticks([])\n",
    "    plt.yticks([])\n",
    "\n",
    "    plt.xlim(0, 1)\n",
    "    plt.ylim(0, 1)\n",
    "    filepath = os.path.join(prefix, 'HOGRL.png')\n",
    "    plt.savefig(filepath)\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):\n",
    "    model.eval()\n",
    "    all_probs = []\n",
    "    all_labels = []\n",
    "\n",
    "    # 分批处理\n",
    "    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):\n",
    "        subgraph_data = []\n",
    "\n",
    "        # 为每个中心节点构建子图\n",
    "        for xi in batch_centers:\n",
    "            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "            subgraph_data.append(subgraph)\n",
    "\n",
    "        # 获取中心节点预测\n",
    "        with torch.no_grad():\n",
    "            center_logits, _ = model(subgraph_data)  # [B, 2]\n",
    "            probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率\n",
    "            all_probs.extend(probs.cpu().numpy())\n",
    "            all_labels.extend([labels[xi] for xi in batch_centers])\n",
    "\n",
    "    # 计算指标\n",
    "    auc_score = roc_auc_score(all_labels, all_probs)\n",
    "    ap_score = average_precision_score(all_labels, all_probs)\n",
    "    pred_labels = (np.array(all_probs) >= 0.5).astype(int)\n",
    "    f1 = f1_score(all_labels, pred_labels, average='macro')\n",
    "    g_mean = calculate_g_mean(all_labels, pred_labels)\n",
    "\n",
    "    return auc_score, ap_score, f1, g_mean\n",
    "\n",
    "\n",
    "class BSNE_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=3, dim_feedforward=256,\n",
    "                 drop_rate=0.5):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.nhead = nhead\n",
    "\n",
    "        self.feature_proj = nn.Sequential(\n",
    "            nn.Linear(in_feat, d_model),\n",
    "            nn.LayerNorm(d_model),\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.norm = nn.LayerNorm(d_model)\n",
    "\n",
    "        self.relation_encoders = nn.ModuleList([\n",
    "            TransformerEncoder(\n",
    "                TransformerEncoderLayer(\n",
    "                    d_model=d_model,\n",
    "                    nhead=nhead,\n",
    "                    dim_feedforward=dim_feedforward,\n",
    "                    dropout=drop_rate,\n",
    "                    batch_first=True\n",
    "                ),\n",
    "                num_layers=num_layers\n",
    "            ) for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    #         self.node_feature_extractor = nn.Sequential(\n",
    "    #             nn.Linear(relation_nums * d_model, d_model),\n",
    "    #             nn.ReLU(),\n",
    "    #             nn.LayerNorm(d_model)\n",
    "    #         )\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 为每个子图单独处理\n",
    "        center_logits_list = []\n",
    "        center_features_list = []\n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 处理单个子图\n",
    "            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]\n",
    "            features = self.norm(features)\n",
    "\n",
    "            # 为每个关系类型单独处理\n",
    "            rel_outputs = []\n",
    "            num_nodes = features.size(1)\n",
    "\n",
    "            for rel_idx in range(self.relation_nums):\n",
    "                # 构建当前关系的邻接矩阵\n",
    "                edge_index = sg['edges'][rel_idx]\n",
    "                adj = torch.zeros(num_nodes, num_nodes,\n",
    "                                  dtype=torch.float, device=features.device)\n",
    "\n",
    "                if edge_index.size(1) > 0:\n",
    "                    src, dst = edge_index\n",
    "                    adj[src, dst] = 1.0\n",
    "\n",
    "                # 添加自环\n",
    "                adj[range(num_nodes), range(num_nodes)] = 1.0\n",
    "\n",
    "                # 创建注意力掩码\n",
    "                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))\n",
    "                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)\n",
    "\n",
    "                # 扩展为多头注意力掩码\n",
    "                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]\n",
    "                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)\n",
    "                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)\n",
    "\n",
    "                # 关系编码\n",
    "                encoder_output = self.relation_encoders[rel_idx](\n",
    "                    src=features,\n",
    "                    mask=adj_mask\n",
    "                )\n",
    "                rel_outputs.append(encoder_output)\n",
    "\n",
    "            # 合并多关系特征\n",
    "            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]\n",
    "\n",
    "            # 提取中心节点特征（第一个节点）\n",
    "            center_features = combined[:, 0, :]  # [1, rel*d_model]\n",
    "            center_logits = self.classifier(center_features)  # [1, out_feat]\n",
    "            center_logits = F.log_softmax(center_logits, dim=-1)\n",
    "\n",
    "            center_logits_list.append(center_logits)\n",
    "            center_features_list.append(center_features)\n",
    "\n",
    "        # 将结果堆叠为批次\n",
    "        center_logits = torch.cat(center_logits_list, dim=0)\n",
    "        center_features = torch.cat(center_features_list, dim=0)\n",
    "\n",
    "        return center_logits, center_features\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "817224f9",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "local_loss: 0.0016437826581571593,global_loss: -1.4995879101397158\n",
      "local_loss: -0.06081257776766403,global_loss: -0.8467811035176434\n",
      "local_loss: -0.0159433407145749,global_loss: -0.9718226732169523\n",
      "local_loss: -0.07453283943930285,global_loss: -1.1587293816081405\n",
      "local_loss: -0.04494411257001846,global_loss: -1.4559385778312046\n",
      "local_loss: -0.04378341145409235,global_loss: -1.5906977058770195\n",
      "local_loss: -0.024076403958424326,global_loss: -0.7379650959566844\n",
      "local_loss: 0.001808529896316339,global_loss: -1.4589749982572828\n",
      "Pretrain Epoch: 000, BSNE Loss: -1.2476\n",
      "local_loss: -0.07385012216141269,global_loss: -1.7537944138889343\n",
      "local_loss: -0.09356777583915062,global_loss: -2.0346644520246393\n",
      "local_loss: -0.006612906164699064,global_loss: -1.5758488954546643\n",
      "local_loss: -0.09544890978152762,global_loss: -1.311986692521368\n",
      "local_loss: -0.052154280631391374,global_loss: -2.0821388638575864\n",
      "local_loss: -0.023301668462266632,global_loss: -1.3507880495655984\n",
      "local_loss: -0.09485202620138886,global_loss: -1.9101305165119677\n",
      "local_loss: -0.09008115687484734,global_loss: -1.3668432760384501\n",
      "local_loss: -0.07375713770470754,global_loss: -1.2242473739572701\n",
      "local_loss: -0.06001398906033093,global_loss: -1.3235085181027881\n",
      "local_loss: -0.08603412556013437,global_loss: -1.5983361399516889\n",
      "local_loss: -0.007593601253403491,global_loss: -1.6697180783486787\n",
      "local_loss: 0.0014166151653360562,global_loss: -1.093280529173974\n",
      "local_loss: -0.0236278931980716,global_loss: -1.336892208392504\n",
      "local_loss: -0.09502995458099266,global_loss: -1.4587137215405024\n",
      "local_loss: -0.04509358966584946,global_loss: -1.3693985743197645\n",
      "local_loss: -0.02334937488461829,global_loss: -1.4538425025461439\n",
      "local_loss: -0.09497687806397838,global_loss: -1.7184865465471322\n",
      "local_loss: -0.07384546732248737,global_loss: -1.6539757486654156\n",
      "local_loss: -0.08646915999413501,global_loss: -1.4144751779544689\n",
      "local_loss: -0.0594714505536864,global_loss: -1.0667636861836218\n",
      "local_loss: -0.06012970154405489,global_loss: -1.129729211902577\n",
      "local_loss: -0.007140095056155304,global_loss: -0.8841785480959904\n",
      "local_loss: -0.023644559792077368,global_loss: -1.2797291285938253\n",
      "local_loss: -0.0600395679678884,global_loss: -1.6575943884573203\n",
      "local_loss: 0.001878318420399068,global_loss: -0.897350126035288\n",
      "local_loss: -0.03472399376828294,global_loss: -0.6491839191946899\n",
      "local_loss: 0.0012418657001202825,global_loss: -1.5724495650615293\n",
      "local_loss: -0.022695846852769562,global_loss: -1.2528747152836182\n",
      "local_loss: -0.022796868210714596,global_loss: -1.5393546720092268\n",
      "local_loss: -0.08352445101701497,global_loss: -1.5065063252162074\n",
      "local_loss: -0.09490165966962984,global_loss: -1.4210833355445651\n",
      "local_loss: -0.056182195492472095,global_loss: -1.4421613533592126\n",
      "local_loss: -0.08370990661175898,global_loss: -1.433939550977634\n",
      "local_loss: -0.060057710486431465,global_loss: -1.4870078358862002\n",
      "local_loss: 0.0018527168063098287,global_loss: -0.9771732770628935\n",
      "local_loss: -0.0611413115751824,global_loss: -0.7295338474389977\n",
      "local_loss: -0.008172515623259099,global_loss: -1.1948807372571442\n",
      "local_loss: 0.0007830348212721612,global_loss: -1.0951841534975426\n",
      "local_loss: 0.0025734675241995235,global_loss: -1.5162145361529369\n",
      "Pretrain Epoch: 005, BSNE Loss: -1.2675\n",
      "local_loss: -0.04456326727306789,global_loss: -0.8701624916994318\n",
      "local_loss: -0.05935840608739566,global_loss: -1.4298426664248038\n",
      "local_loss: -0.08793685549562212,global_loss: -0.9697729044699812\n",
      "local_loss: 0.00201732780528095,global_loss: -1.3572686192097552\n",
      "local_loss: -0.007101357713684136,global_loss: -1.031976316717601\n",
      "local_loss: -0.04415140485430278,global_loss: -1.2885090938125225\n",
      "local_loss: 0.0014770145387084522,global_loss: -1.1241950519860915\n",
      "local_loss: -0.044238325456283804,global_loss: -1.2421870057168034\n",
      "local_loss: -0.007537357175266865,global_loss: -1.2617143231883683\n",
      "local_loss: -0.02364081321440577,global_loss: -1.4908638963471237\n",
      "local_loss: -0.09755727750640224,global_loss: -1.3133111155010917\n",
      "local_loss: -0.015364142513077505,global_loss: -1.7298238278083382\n",
      "local_loss: -0.022933402583680092,global_loss: -1.007855711660151\n",
      "local_loss: -0.060410218032266386,global_loss: -1.257495439199751\n",
      "local_loss: -0.02360539102551068,global_loss: -1.6618477474689206\n",
      "local_loss: -0.09104018993265055,global_loss: -1.1709203741635612\n",
      "local_loss: 0.0016180788646768765,global_loss: -1.114501027628469\n",
      "local_loss: -0.022597482158991662,global_loss: -1.5142160399773459\n",
      "local_loss: -0.08304488907505068,global_loss: -1.5077010585270656\n",
      "local_loss: 0.001964194521938316,global_loss: -1.222315003033664\n",
      "local_loss: -0.015233182593556717,global_loss: -1.4547332546895637\n",
      "local_loss: -0.09081848336061743,global_loss: -1.3475055088304642\n",
      "local_loss: -0.023613485903934462,global_loss: -0.9845442824883405\n",
      "local_loss: -0.05562848537240723,global_loss: -1.35741399937561\n",
      "local_loss: 0.0012936479448480075,global_loss: -0.9875452634992636\n",
      "local_loss: -0.04323106372590806,global_loss: -1.4016976432835488\n",
      "local_loss: -0.09657536761827097,global_loss: -1.378447675694928\n",
      "local_loss: -0.006763189342392749,global_loss: -1.1824985052850052\n",
      "local_loss: -0.0074403321668382,global_loss: -1.4822674883215095\n",
      "local_loss: -0.09862683374537194,global_loss: -1.2476470493047405\n",
      "local_loss: -0.08902516692820951,global_loss: -1.6392262040237575\n",
      "local_loss: -0.030145580448420292,global_loss: -1.4493414781097207\n",
      "local_loss: -0.044620033601425406,global_loss: -0.933040794158693\n",
      "local_loss: -0.0728313470087905,global_loss: -1.7121412152514677\n",
      "local_loss: -0.024468341395935952,global_loss: -0.7543714046962638\n",
      "local_loss: 0.00112355331855755,global_loss: -1.4846592027602763\n",
      "local_loss: -0.04374375449710179,global_loss: -1.5231266545131155\n",
      "local_loss: -0.07377579233106135,global_loss: -1.2119222427113931\n",
      "local_loss: -0.007363334319054068,global_loss: -1.0043102563063573\n",
      "local_loss: 0.002870979851121259,global_loss: -1.8362892915416285\n",
      "Pretrain Epoch: 010, BSNE Loss: -1.3403\n",
      "local_loss: 0.0024601505795322524,global_loss: -1.6580197589422905\n",
      "local_loss: -0.044583986982918385,global_loss: -1.264834236389326\n",
      "local_loss: -0.08366688908812965,global_loss: -1.3424876314445633\n",
      "local_loss: -0.02235431791483941,global_loss: -1.6553680031830262\n",
      "local_loss: -0.020923646894113145,global_loss: -1.1805934660293091\n",
      "local_loss: -0.006481957598443949,global_loss: -1.0424892393182579\n",
      "local_loss: -0.02363748670756402,global_loss: -1.3398128624515413\n",
      "local_loss: 0.0016776267431239096,global_loss: -0.9739794968719406\n",
      "local_loss: -0.10516696602680309,global_loss: -1.633189805684597\n",
      "local_loss: -0.09623697218166614,global_loss: -1.4194967935880676\n",
      "local_loss: -0.09728846031743338,global_loss: -0.7421535400144208\n",
      "local_loss: -0.04442317932794722,global_loss: -1.1889752562958513\n",
      "local_loss: -0.05640995335510812,global_loss: -1.1721903815307446\n",
      "local_loss: -0.04405794477129497,global_loss: -1.0761119598546156\n",
      "local_loss: -0.07401650305897302,global_loss: -1.5133889641011598\n",
      "local_loss: -0.044025917208835655,global_loss: -1.5130728636570132\n",
      "local_loss: -0.034120987768672784,global_loss: -1.3864374026710895\n",
      "local_loss: 0.001867510111479797,global_loss: -1.46057868984759\n",
      "local_loss: 0.001092445370617632,global_loss: -0.8973248532483665\n",
      "local_loss: -0.023480413676600864,global_loss: -1.4817218642925531\n",
      "local_loss: 0.0014452481013595868,global_loss: -0.8357957373564373\n",
      "local_loss: -0.09662261399233107,global_loss: -2.000386724720088\n",
      "local_loss: -0.023253258137443344,global_loss: -1.4177053990640665\n",
      "local_loss: 0.0019679751594069267,global_loss: -1.7139671699522108\n",
      "local_loss: -0.060441984469615254,global_loss: -1.428371766725335\n",
      "local_loss: -0.06046956155193091,global_loss: -1.3954320000010727\n",
      "local_loss: -0.062051096993218494,global_loss: -1.9422847035978366\n",
      "local_loss: -0.059944552487484024,global_loss: -1.6267038166275307\n",
      "local_loss: -0.09407849114211751,global_loss: -1.4912524867762276\n",
      "local_loss: -0.04435666054437788,global_loss: -1.47906361325186\n",
      "local_loss: -0.07390583263606275,global_loss: -1.1590162674081383\n",
      "local_loss: -0.04485802075643145,global_loss: -1.7412327758295418\n"
     ]
    }
   ],
   "source": [
    "# 进行完整测试（修改前）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "    if pretrain_early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "\n",
    "\n",
    "    # 计算TSNE损失\n",
    "    #     tsne_loss = calculate_tsne_loss(\n",
    "    #         model=bsne_model,\n",
    "    #         feat_data=feat_data,\n",
    "    #         dist_matrix=dist_matrix,\n",
    "    #         batch_centers=batch_centers,\n",
    "    #         sample_size=args['sample_size'],\n",
    "    #         temperature=100,\n",
    "    #         eps=1e-10,\n",
    "    #         edge_indexs=edge_indexs,\n",
    "    #         device=device\n",
    "    #     )\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.exp(-dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            # 将中心节点和采样的邻居合并\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            # 如果没有可用邻居，只使用中心节点\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            # 将中心节点和采样的邻居合并\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            # 如果没有可用邻居，只使用中心节点\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的原始距离\n",
    "        orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "        # 计算P向量（基于节点路径距离）\n",
    "        P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "        P_sum = P.sum()\n",
    "        P = P / (P_sum + eps)\n",
    "        P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "        Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "        Q_sum = Q.sum()\n",
    "        Q = Q / (Q_sum + eps)\n",
    "\n",
    "        # 局部损失（KL散度）\n",
    "        loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "\n",
    "        # 计算k_Bp\n",
    "        # 获取中心节点到图中所有节点的距离\n",
    "        all_dists = dist_matrix[center_idx]  # [N]\n",
    "        # 计算所有节点的未归一化概率值 (1 + d^2)^{-1}\n",
    "        unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "        # 排除中心节点自身（设置自身概率为0）\n",
    "        unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "        unnorm_probs_all[center_idx] = 0\n",
    "        Z_i = unnorm_probs_all.sum()  # 标量值\n",
    "        sum_p_cond = P.sum()\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "        # 计算中心节点到Bu子图节点的特征距离\n",
    "        bu_features = bu_features_batch[i]\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        # 计算指数距离之和\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        # 全局损失\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "        # 合并损失\n",
    "        total_loss += loss_local + loss_global\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "    bsne_loss = total_loss / batch_size\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    # 更新最佳损失和早停计数器\n",
    "    if bsne_loss.item() < pretrain_best_loss:\n",
    "        pretrain_best_loss = bsne_loss.item()\n",
    "        pretrain_no_improve = 0\n",
    "    else:\n",
    "        pretrain_no_improve += 1\n",
    "\n",
    "    if pretrain_no_improve >= args['pretrain_patience']:\n",
    "        print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "        pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "8098a231",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mKeyboardInterrupt\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[2]\u001b[39m\u001b[32m, line 36\u001b[39m\n\u001b[32m     33\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m'\u001b[39m\u001b[33mloading data...\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m     34\u001b[39m prefix = \u001b[33m\"\u001b[39m\u001b[33m../../data/\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m---> \u001b[39m\u001b[32m36\u001b[39m edge_indexs, feat_data, labels = \u001b[43mload_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mdataset\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mlayers_tree\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprefix\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     38\u001b[39m np.random.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n\u001b[32m     39\u001b[39m rd.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 177\u001b[39m, in \u001b[36mload_data\u001b[39m\u001b[34m(data, k, prefix)\u001b[39m\n\u001b[32m    175\u001b[39m         tree = pickle.load(file)\n\u001b[32m    176\u001b[39m     file.close()\n\u001b[32m--> \u001b[39m\u001b[32m177\u001b[39m     relation1_tree.append(\u001b[43mnumpy_array_to_edge_index\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtree\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[32m    178\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(pickle_file[\u001b[33m'\u001b[39m\u001b[33mamz_usu\u001b[39m\u001b[33m'\u001b[39m], \u001b[33m'\u001b[39m\u001b[33mrb\u001b[39m\u001b[33m'\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[32m    179\u001b[39m     relation2 = pickle.load(file)\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[1]\u001b[39m\u001b[32m, line 111\u001b[39m, in \u001b[36mnumpy_array_to_edge_index\u001b[39m\u001b[34m(np_array)\u001b[39m\n\u001b[32m    109\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnumpy_array_to_edge_index\u001b[39m(np_array):\n\u001b[32m    110\u001b[39m     \u001b[38;5;28;01massert\u001b[39;00m np_array.ndim == \u001b[32m2\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m np_array.shape[\u001b[32m0\u001b[39m] == np_array.shape[\u001b[32m1\u001b[39m], \u001b[33m\"\u001b[39m\u001b[33mInput must be a square matrix.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m--> \u001b[39m\u001b[32m111\u001b[39m     rows, cols = \u001b[43mnp\u001b[49m\u001b[43m.\u001b[49m\u001b[43mnonzero\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnp_array\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    112\u001b[39m     edge_index = np.vstack((rows, cols))\n\u001b[32m    113\u001b[39m     edge_index_tensor = torch.from_numpy(edge_index).long()\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:1973\u001b[39m, in \u001b[36mnonzero\u001b[39m\u001b[34m(a)\u001b[39m\n\u001b[32m   1881\u001b[39m \u001b[38;5;129m@array_function_dispatch\u001b[39m(_nonzero_dispatcher)\n\u001b[32m   1882\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mnonzero\u001b[39m(a):\n\u001b[32m   1883\u001b[39m \u001b[38;5;250m    \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m   1884\u001b[39m \u001b[33;03m    Return the indices of the elements that are non-zero.\u001b[39;00m\n\u001b[32m   1885\u001b[39m \n\u001b[32m   (...)\u001b[39m\u001b[32m   1971\u001b[39m \n\u001b[32m   1972\u001b[39m \u001b[33;03m    \"\"\"\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1973\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43m_wrapfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43ma\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mnonzero\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/numpy/core/fromnumeric.py:59\u001b[39m, in \u001b[36m_wrapfunc\u001b[39m\u001b[34m(obj, method, *args, **kwds)\u001b[39m\n\u001b[32m     56\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n\u001b[32m     58\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m---> \u001b[39m\u001b[32m59\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mbound\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwds\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     60\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mTypeError\u001b[39;00m:\n\u001b[32m     61\u001b[39m     \u001b[38;5;66;03m# A TypeError occurs if the object does have such a method in its\u001b[39;00m\n\u001b[32m     62\u001b[39m     \u001b[38;5;66;03m# class, but its signature is not identical to that of NumPy's. This\u001b[39;00m\n\u001b[32m   (...)\u001b[39m\u001b[32m     66\u001b[39m     \u001b[38;5;66;03m# Call _wrapit from within the except clause to ensure a potential\u001b[39;00m\n\u001b[32m     67\u001b[39m     \u001b[38;5;66;03m# exception has a traceback chain.\u001b[39;00m\n\u001b[32m     68\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m _wrapit(obj, method, *args, **kwds)\n",
      "\u001b[31mKeyboardInterrupt\u001b[39m: "
     ]
    }
   ],
   "source": [
    "# 原预训练部分调试\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6, \n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "bsne_model.train()\n",
    "optimizer.zero_grad()\n",
    "\n",
    "# batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "sample_size = args['sample_size']\n",
    "temperature = 100\n",
    "eps = 1e-10\n",
    "\n",
    "total_loss = 0.0\n",
    "batch_size = len(batch_centers)\n",
    "\n",
    "# 为每个中心节点构建Bp子图\n",
    "bp_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    # 根据距离矩阵计算采样概率\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    probs = np.exp(-dist_row)\n",
    "    probs[xi] = 0  # 排除自身\n",
    "    probs /= probs.sum()\n",
    "\n",
    "    # 采样邻居节点\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "# 为每个中心节点构建Bu子图（均匀采样）\n",
    "bu_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "    # 随机采样节点构建Bu子图\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bu_subgraphs.append(subgraph_nodes)\n",
    "# 计算Bp子图中所有节点的特征\n",
    "bp_features_batch = []\n",
    "for bp_nodes in bp_subgraphs:\n",
    "    bp_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bp_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bp子图所有节点的特征收集起来\n",
    "    bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 计算Bu子图中所有节点的特征\n",
    "bu_features_batch = []\n",
    "for bu_nodes in bu_subgraphs:\n",
    "    bu_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bu_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bu子图所有节点的特征收集起来\n",
    "    bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 针对每一对Bp和Bu子图计算loss\n",
    "for i in range(batch_size):\n",
    "    # 获取中心节点索引和特征\n",
    "    center_idx = batch_centers[i]\n",
    "    center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "    # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "    other_bp_indices = bp_subgraphs[i][1:]\n",
    "    other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的原始距离\n",
    "    orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的特征距离\n",
    "    feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "    # 计算P向量（基于节点路径距离）\n",
    "    P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "    P_sum = P.sum()\n",
    "    P = P / (P_sum + eps)\n",
    "    P = torch.tensor(P, device=device)\n",
    "\n",
    "    # 计算Q向量（基于特征距离）\n",
    "    Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    Q_sum = Q.sum()\n",
    "    Q = Q / (Q_sum + eps)\n",
    "    \n",
    "    # 局部损失（KL散度）\n",
    "    loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "\n",
    "    # 计算k_Bp\n",
    "    # 获取中心节点到图中所有节点的距离\n",
    "    all_dists = dist_matrix[center_idx]  # [N]\n",
    "    # 计算所有节点的未归一化概率值 (1 + d^2)^{-1}\n",
    "    unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "    # 排除中心节点自身（设置自身概率为0）\n",
    "    unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "    unnorm_probs_all[center_idx] = 0\n",
    "    Z_i = unnorm_probs_all.sum()\n",
    "    sum_p_cond = P.sum()\n",
    "    N = dist_matrix.shape[0]\n",
    "    k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "    # 计算中心节点到Bu子图节点的特征距离\n",
    "    bu_features = bu_features_batch[i]\n",
    "    feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "    # 计算指数距离之和\n",
    "    sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "    sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "    # 全局损失\n",
    "    global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "    loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "    # 合并损失\n",
    "    total_loss += loss_local + loss_global\n",
    "    print('P:',P)\n",
    "    print('Q:',Q)\n",
    "    print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "bsne_loss = total_loss / batch_size\n",
    "\n",
    "bsne_loss.backward()\n",
    "optimizer.step()\n",
    "\n",
    "# 更新最佳损失和早停计数器\n",
    "if bsne_loss.item() < pretrain_best_loss:\n",
    "    pretrain_best_loss = bsne_loss.item()\n",
    "    pretrain_no_improve = 0\n",
    "else:\n",
    "    pretrain_no_improve += 1\n",
    "\n",
    "if pretrain_no_improve >= args['pretrain_patience']:\n",
    "    print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "    pretrain_early_stop = True\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "491cb31d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "P: tensor([0.0951, 0.0350, 0.0350, 0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0350,\n",
      "        0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0951, 0.0350, 0.0350, 0.0350,\n",
      "        0.0350, 0.0951], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0870, 0.0252, 0.0122, 0.1046, 0.0083, 0.0110, 0.0215, 0.0485, 0.0126,\n",
      "        0.0360, 0.0280, 0.0155, 0.2044, 0.0891, 0.0310, 0.0265, 0.0356, 0.0953,\n",
      "        0.0233, 0.0841], device='cuda:0')\n",
      "local_loss: 0.26816328109984583,global_loss: -1.3172113920079427\n",
      "P: tensor([0.0257, 0.0699, 0.0699, 0.0699, 0.0257, 0.0699, 0.0257, 0.0257, 0.0699,\n",
      "        0.0257, 0.0257, 0.0699, 0.0699, 0.0257, 0.0699, 0.0699, 0.0699, 0.0257,\n",
      "        0.0699, 0.0257], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0927, 0.0139, 0.0355, 0.0535, 0.1191, 0.0420, 0.0436, 0.0338, 0.0220,\n",
      "        0.0069, 0.0500, 0.0317, 0.0456, 0.0341, 0.0445, 0.0325, 0.0221, 0.1271,\n",
      "        0.1273, 0.0220], device='cuda:0')\n",
      "local_loss: 0.12610869781579812,global_loss: -1.2795556945524054\n",
      "P: tensor([0.0282, 0.0766, 0.0766, 0.0282, 0.0282, 0.0282, 0.0766, 0.0766, 0.0766,\n",
      "        0.0282, 0.0282, 0.0282, 0.0282, 0.0766, 0.0282, 0.0282, 0.0766, 0.0282,\n",
      "        0.0766, 0.0766], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0778, 0.0217, 0.0508, 0.0126, 0.0271, 0.0288, 0.0303, 0.0155, 0.1186,\n",
      "        0.0333, 0.0214, 0.0643, 0.0740, 0.0620, 0.1310, 0.0430, 0.0501, 0.0613,\n",
      "        0.0386, 0.0372], device='cuda:0')\n",
      "local_loss: 0.0598002396929537,global_loss: -0.6018635864099575\n",
      "P: tensor([0.0330, 0.0897, 0.0330, 0.0897, 0.0330, 0.0897, 0.0330, 0.0330, 0.0897,\n",
      "        0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0330, 0.0897, 0.0330,\n",
      "        0.0330, 0.0897], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0291, 0.0405, 0.0209, 0.0351, 0.0089, 0.0814, 0.0300, 0.0299, 0.0365,\n",
      "        0.0577, 0.1201, 0.0882, 0.0072, 0.0759, 0.0214, 0.0556, 0.0394, 0.0308,\n",
      "        0.0909, 0.1003], device='cuda:0')\n",
      "local_loss: 0.1172614689787698,global_loss: -1.084382291651389\n",
      "P: tensor([0.0372, 0.0372, 0.0372, 0.0372, 0.0372, 0.0372, 0.1012, 0.1012, 0.0372,\n",
      "        0.0372, 0.0372, 0.1012, 0.0372, 0.1012, 0.0372, 0.0372, 0.0372, 0.0372,\n",
      "        0.0372, 0.0372], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0872, 0.0453, 0.0809, 0.1055, 0.0554, 0.0238, 0.0239, 0.0378, 0.0436,\n",
      "        0.0243, 0.1010, 0.0449, 0.0240, 0.0264, 0.0314, 0.0597, 0.0442, 0.0753,\n",
      "        0.0354, 0.0294], device='cuda:0')\n",
      "local_loss: 0.028813683215835707,global_loss: -0.38870563368974426\n",
      "P: tensor([0.0246, 0.0669, 0.0669, 0.0246, 0.0246, 0.0246, 0.0669, 0.0246, 0.0669,\n",
      "        0.0669, 0.0246, 0.0669, 0.0669, 0.0669, 0.0669, 0.0246, 0.0669, 0.0669,\n",
      "        0.0246, 0.0669], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0080, 0.0318, 0.0351, 0.0204, 0.0175, 0.0562, 0.0229, 0.1497, 0.0181,\n",
      "        0.1351, 0.0531, 0.1020, 0.0127, 0.1036, 0.0443, 0.0512, 0.0216, 0.0278,\n",
      "        0.0538, 0.0347], device='cuda:0')\n",
      "local_loss: 0.18434071831931248,global_loss: -1.1167904900255634\n",
      "P: tensor([0.0439, 0.0439, 0.0439, 0.0439, 0.0439, 0.0161, 0.0439, 0.0439, 0.0439,\n",
      "        0.1192, 0.0439, 0.0439, 0.1192, 0.0439, 0.0439, 0.0439, 0.0439, 0.0439,\n",
      "        0.0439, 0.0439], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0540, 0.0524, 0.0245, 0.0414, 0.0417, 0.0415, 0.0303, 0.0240, 0.0442,\n",
      "        0.0415, 0.0373, 0.0667, 0.0714, 0.0286, 0.0132, 0.0288, 0.0156, 0.2517,\n",
      "        0.0197, 0.0715], device='cuda:0')\n",
      "local_loss: 0.16411689181627467,global_loss: -0.9271910526311694\n",
      "P: tensor([0.0236, 0.0642, 0.0642, 0.0236, 0.0642, 0.0642, 0.0642, 0.0642, 0.0642,\n",
      "        0.0642, 0.0236, 0.0236, 0.0236, 0.0642, 0.0642, 0.0236, 0.0642, 0.0236,\n",
      "        0.0642, 0.0642], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0559, 0.0233, 0.0885, 0.0729, 0.0203, 0.0878, 0.0111, 0.0448, 0.0266,\n",
      "        0.0496, 0.0455, 0.0770, 0.1196, 0.0465, 0.0395, 0.0250, 0.0488, 0.0340,\n",
      "        0.0451, 0.0378], device='cuda:0')\n",
      "local_loss: 0.04376085387566986,global_loss: -0.9340696621677361\n"
     ]
    }
   ],
   "source": [
    "# 新预训练部分调试\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6, \n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "\n",
    "bsne_model.train()\n",
    "optimizer.zero_grad()\n",
    "\n",
    "# batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "sample_size = args['sample_size']\n",
    "temperature = 100\n",
    "eps = 1e-10\n",
    "\n",
    "total_loss = 0.0\n",
    "batch_size = len(batch_centers)\n",
    "\n",
    "# 为每个中心节点构建Bp子图\n",
    "bp_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    # 根据距离矩阵计算采样概率\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    probs = np.power(10.0, -dist_row)\n",
    "    probs[xi] = 0  # 排除自身\n",
    "    probs /= probs.sum()\n",
    "    \n",
    "    # 采样邻居节点\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "# 为每个中心节点构建Bu子图（均匀采样）\n",
    "bu_subgraphs = []\n",
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "    # 随机采样节点构建Bu子图\n",
    "    available_nodes = len(dist_row) - 1\n",
    "    actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "    if actual_sample_size > 0:\n",
    "        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "        subgraph_nodes = [xi] + neighbors.tolist()\n",
    "    else:\n",
    "        subgraph_nodes = [xi]\n",
    "\n",
    "    bu_subgraphs.append(subgraph_nodes)\n",
    "# 计算Bp子图中所有节点的特征\n",
    "bp_features_batch = []\n",
    "for bp_nodes in bp_subgraphs:\n",
    "    bp_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bp_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bp子图所有节点的特征收集起来\n",
    "    bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 计算Bu子图中所有节点的特征\n",
    "bu_features_batch = []\n",
    "for bu_nodes in bu_subgraphs:\n",
    "    bu_node_features = []\n",
    "\n",
    "    # 为子图中的每个节点计算特征\n",
    "    for node_idx in bu_nodes:\n",
    "        # 构建该节点的一阶邻居子图\n",
    "        node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "        # 获取节点特征\n",
    "        _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "        bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "    # 将该Bu子图所有节点的特征收集起来\n",
    "    bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "# 针对每一对Bp和Bu子图计算loss\n",
    "for i in range(batch_size):\n",
    "    # 获取中心节点索引和特征\n",
    "    center_idx = batch_centers[i]\n",
    "    center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "    # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "    other_bp_indices = bp_subgraphs[i][1:]\n",
    "    other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的原始距离\n",
    "    orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "    # 计算中心节点到其他Bp节点的特征距离\n",
    "    feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "    # 计算P向量（基于节点路径距离）\n",
    "#     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "    P = np.exp(-orig_dists_bp)\n",
    "#     P = np.power(10.0, -orig_dists_bp)\n",
    "    P_sum = P.sum()\n",
    "    P = P / (P_sum + eps)\n",
    "    P = torch.tensor(P, device=device)\n",
    "\n",
    "    # 计算Q向量（基于特征距离）\n",
    "#     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "#     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "    Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "    Q_sum = Q.sum()\n",
    "    Q = Q / (Q_sum + eps)\n",
    "    Q = torch.tensor(Q, device=device)\n",
    "    \n",
    "    # 局部损失（KL散度）\n",
    "#     loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "#     loss_local = (P * (torch.log(P + eps) - torch.log(Q + eps))).sum()\n",
    "    loss_local = (torch.log(P + eps) - torch.log(Q + eps)).mean()\n",
    "\n",
    "    # 计算k_Bp\n",
    "    # 获取中心节点到图中所有节点的距离\n",
    "    all_dists = dist_matrix[center_idx]  # [N]\n",
    "    \n",
    "#     \n",
    "#     unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "# \n",
    "    unnorm_probs_all = np.exp(-all_dists.cpu().numpy())\n",
    "    unnorm_probs_all[center_idx] = 0\n",
    "    Z_i = unnorm_probs_all.sum()\n",
    "    sum_p_cond = P.sum()\n",
    "    N = dist_matrix.shape[0]\n",
    "    k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "    # 计算中心节点到Bu子图节点的特征距离\n",
    "    bu_features = bu_features_batch[i]\n",
    "    feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "    # 计算指数距离之和\n",
    "    sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "    sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "    # 全局损失\n",
    "    global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "    loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "    # 合并损失\n",
    "    total_loss += loss_local + loss_global\n",
    "    print('P:',P)\n",
    "    print('Q:',Q)\n",
    "    print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "bsne_loss = total_loss / batch_size\n",
    "\n",
    "bsne_loss.backward()\n",
    "optimizer.step()\n",
    "\n",
    "# 更新最佳损失和早停计数器\n",
    "if bsne_loss.item() < pretrain_best_loss:\n",
    "    pretrain_best_loss = bsne_loss.item()\n",
    "    pretrain_no_improve = 0\n",
    "else:\n",
    "    pretrain_no_improve += 1\n",
    "\n",
    "if pretrain_no_improve >= args['pretrain_patience']:\n",
    "    print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "    pretrain_early_stop = True\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "2227bbc5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 5103: distance counts: {0: 1, 1: 459, 2: 11334, 3: 150}\n",
      "Center 4736: distance counts: {0: 1, 1: 1919, 2: 10005, 3: 19}\n",
      "Center 9863: distance counts: {0: 1, 1: 580, 2: 11188, 3: 175}\n",
      "Center 10791: distance counts: {0: 1, 1: 98, 2: 9597, 3: 2248}\n",
      "Center 3137: distance counts: {0: 1, 1: 237, 2: 11442, 3: 264}\n",
      "Center 1875: distance counts: {0: 1, 1: 865, 2: 10998, 3: 80}\n",
      "Center 2359: distance counts: {0: 1, 1: 87, 2: 10409, 3: 1447}\n",
      "Center 475: distance counts: {0: 1, 1: 1813, 2: 10098, 3: 32}\n"
     ]
    }
   ],
   "source": [
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    unique, counts = np.unique(dist_row, return_counts=True)\n",
    "    print(f\"Center {xi}: distance counts: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a177c51b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(-0.7993, device='cuda:0', dtype=torch.float64, grad_fn=<DivBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "bsne_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e686876",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3016331f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7fcf7bbe",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1d60670e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "04de2062",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "Pretrain Epoch: 000, BSNE Loss: 0.5530\n",
      "Pretrain Epoch: 001, BSNE Loss: 0.5389\n",
      "Pretrain Epoch: 003, BSNE Loss: 0.4252\n",
      "Pretrain Epoch: 004, BSNE Loss: 0.5117\n",
      "Pretrain Epoch: 005, BSNE Loss: 0.4837\n",
      "Pretrain Epoch: 006, BSNE Loss: 0.5010\n",
      "Pretrain Epoch: 007, BSNE Loss: 0.3906\n",
      "Pretrain Epoch: 008, BSNE Loss: 0.4722\n",
      "Pretrain Epoch: 009, BSNE Loss: 0.5459\n",
      "Pretrain Epoch: 010, BSNE Loss: 0.5214\n",
      "Pretrain Epoch: 011, BSNE Loss: 0.4308\n",
      "Pretrain Epoch: 012, BSNE Loss: 0.5095\n",
      "Pretrain Epoch: 013, BSNE Loss: 0.4855\n",
      "Pretrain Epoch: 014, BSNE Loss: 0.3638\n",
      "Pretrain Epoch: 015, BSNE Loss: 0.5098\n",
      "Pretrain Epoch: 016, BSNE Loss: 0.4818\n",
      "Pretrain Epoch: 017, BSNE Loss: 0.2936\n",
      "Pretrain Epoch: 018, BSNE Loss: 0.2271\n",
      "Pretrain Epoch: 019, BSNE Loss: 0.5671\n",
      "Pretrain Epoch: 020, BSNE Loss: 0.3680\n",
      "Pretrain Epoch: 021, BSNE Loss: 0.4483\n",
      "Pretrain Epoch: 022, BSNE Loss: 0.5006\n",
      "Pretrain Epoch: 023, BSNE Loss: 0.6639\n",
      "Pretrain Epoch: 024, BSNE Loss: 0.4998\n",
      "Pretrain Epoch: 025, BSNE Loss: 0.5709\n",
      "Pretrain Epoch: 026, BSNE Loss: 0.4582\n",
      "Pretrain Epoch: 027, BSNE Loss: 0.7230\n",
      "Pretrain Epoch: 028, BSNE Loss: 0.4208\n",
      "Pretrain Epoch: 029, BSNE Loss: 0.5092\n",
      "Pretrain Epoch: 030, BSNE Loss: 0.4064\n",
      "Pretrain Epoch: 031, BSNE Loss: 0.3964\n",
      "Pretrain Epoch: 032, BSNE Loss: 0.3198\n",
      "Pretrain Epoch: 033, BSNE Loss: 0.4348\n",
      "Pretrain Epoch: 034, BSNE Loss: 0.5296\n",
      "Pretrain Epoch: 035, BSNE Loss: 0.4899\n",
      "Pretrain Epoch: 036, BSNE Loss: 0.5637\n",
      "Pretrain Epoch: 037, BSNE Loss: 0.4085\n",
      "Pretrain early stopping at epoch 38\n",
      "Pretrain Epoch: 038, BSNE Loss: 0.3647\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.1162 | Val AUC: 0.7106 | Val F1: 0.4751\n",
      "Epoch: 005 | Loss: 0.0117 | Val AUC: 0.8122 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0054 | Val AUC: 0.8577 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0096 | Val AUC: 0.8853 | Val F1: 0.4751\n",
      "Epoch: 020 | Loss: 0.0059 | Val AUC: 0.8979 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0107 | Val AUC: 0.8946 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0038 | Val AUC: 0.8958 | Val F1: 0.4751\n",
      "Epoch: 035 | Loss: 0.0062 | Val AUC: 0.8906 | Val F1: 0.5316\n",
      "Epoch: 040 | Loss: 0.0097 | Val AUC: 0.8905 | Val F1: 0.5582\n",
      "Epoch: 045 | Loss: 0.0029 | Val AUC: 0.8934 | Val F1: 0.7005\n",
      "Epoch: 050 | Loss: 0.0140 | Val AUC: 0.8979 | Val F1: 0.7428\n",
      "Epoch: 055 | Loss: 0.0155 | Val AUC: 0.8997 | Val F1: 0.7249\n",
      "Epoch: 060 | Loss: 0.0133 | Val AUC: 0.8986 | Val F1: 0.6561\n",
      "Epoch: 065 | Loss: 0.0366 | Val AUC: 0.8980 | Val F1: 0.6516\n",
      "Epoch: 070 | Loss: 0.0501 | Val AUC: 0.8986 | Val F1: 0.7403\n",
      "Epoch: 075 | Loss: 0.0088 | Val AUC: 0.8993 | Val F1: 0.7470\n",
      "Epoch: 080 | Loss: 0.0096 | Val AUC: 0.8990 | Val F1: 0.7531\n",
      "Epoch: 085 | Loss: 0.0274 | Val AUC: 0.8982 | Val F1: 0.7476\n",
      "Epoch: 090 | Loss: 0.0080 | Val AUC: 0.8997 | Val F1: 0.7645\n",
      "Epoch: 095 | Loss: 0.0101 | Val AUC: 0.8971 | Val F1: 0.7576\n",
      "\n",
      "=== Final Test Results ===\n",
      "Test AUC: 0.8904 | Test AP: 0.4918 | Test F1: 0.7563 | G-mean: 0.7516\n"
     ]
    }
   ],
   "source": [
    "# 进行完整测试（新）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "    \n",
    "    \n",
    "all_local_losses = []      # 每个epoch的batch本地损失列表\n",
    "all_global_losses = []     # 每个epoch的batch全局损失列表\n",
    "all_bsne_losses = []       # 每个epoch的总BSNE损失\n",
    "epoch_avg_local = []       # 每个epoch的平均本地损失\n",
    "epoch_avg_global = []      # 每个epoch的平均全局损失\n",
    "    \n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "    if pretrain_early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    epoch_local_losses = []\n",
    "    epoch_global_losses = []\n",
    "    \n",
    "    # batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "    batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的原始距离\n",
    "        orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "        # 计算P向量（基于节点路径距离）\n",
    "    #     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "        P = np.exp(-orig_dists_bp)\n",
    "    #     P = np.power(10.0, -orig_dists_bp)\n",
    "        P_sum = P.sum()\n",
    "        P = P / (P_sum + eps)\n",
    "        P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "    #     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    #     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "        Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "        Q_sum = Q.sum()\n",
    "        Q = Q / (Q_sum + eps)\n",
    "        Q = torch.tensor(Q, device=device)\n",
    "        \n",
    "        \n",
    "        log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        loss_local = log_ratio.mean()  # 公式(8)第一项\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp (公式定义)\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features = bu_features_batch[i][1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        epoch_local_losses.append(loss_local.item())\n",
    "        epoch_global_losses.append(loss_global.item())\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "    all_local_losses.append(epoch_local_losses)\n",
    "    all_global_losses.append(epoch_global_losses)\n",
    "    \n",
    "    avg_local = np.mean(epoch_local_losses)\n",
    "    avg_global = np.mean(epoch_global_losses)\n",
    "    epoch_avg_local.append(avg_local)\n",
    "    epoch_avg_global.append(avg_global)\n",
    "    \n",
    "    bsne_loss = total_loss / batch_size\n",
    "    \n",
    "    bsne_loss_item = bsne_loss.item()\n",
    "    all_bsne_losses.append(bsne_loss_item)\n",
    "\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    # 更新最佳损失和早停计数器\n",
    "    if bsne_loss.item() < pretrain_best_loss:\n",
    "        pretrain_best_loss = bsne_loss.item()\n",
    "        pretrain_no_improve = 0\n",
    "    else:\n",
    "        pretrain_no_improve += 1\n",
    "\n",
    "    if pretrain_no_improve >= args['pretrain_patience']:\n",
    "        print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "        pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "    print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "#     if epoch % 5 == 0:\n",
    "#         print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "7931b474",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.1246, device='cuda:0', dtype=torch.float64)"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_local"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "bc03aa93",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(0.0442, device='cuda:0', grad_fn=<LogBackward0>)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "loss_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "68bbd875",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 仅进行预训练测试（新）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 100,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "    if pretrain_early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    # batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "    batch_centers = np.random.choice(range(feat_data.shape[0]), size=args['batch_size'], replace=False)\n",
    "\n",
    "\n",
    "    sample_size = args['sample_size']\n",
    "    temperature = 100\n",
    "    eps = 1e-10\n",
    "\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(batch_centers)\n",
    "\n",
    "    # 为每个中心节点构建Bp子图\n",
    "    bp_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        # 根据距离矩阵计算采样概率\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.power(10.0, -dist_row)\n",
    "        probs[xi] = 0  # 排除自身\n",
    "        probs /= probs.sum()\n",
    "\n",
    "        # 采样邻居节点\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bp_subgraphs.append(subgraph_nodes)\n",
    "\n",
    "    # 为每个中心节点构建Bu子图（均匀采样）\n",
    "    bu_subgraphs = []\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "\n",
    "        # 随机采样节点构建Bu子图\n",
    "        available_nodes = len(dist_row) - 1\n",
    "        actual_sample_size = min(sample_size, available_nodes)\n",
    "\n",
    "        if actual_sample_size > 0:\n",
    "            neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)\n",
    "            subgraph_nodes = [xi] + neighbors.tolist()\n",
    "        else:\n",
    "            subgraph_nodes = [xi]\n",
    "\n",
    "        bu_subgraphs.append(subgraph_nodes)\n",
    "    # 计算Bp子图中所有节点的特征\n",
    "    bp_features_batch = []\n",
    "    for bp_nodes in bp_subgraphs:\n",
    "        bp_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bp_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bp_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bp子图所有节点的特征收集起来\n",
    "        bp_features_batch.append(torch.stack(bp_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 计算Bu子图中所有节点的特征\n",
    "    bu_features_batch = []\n",
    "    for bu_nodes in bu_subgraphs:\n",
    "        bu_node_features = []\n",
    "\n",
    "        # 为子图中的每个节点计算特征\n",
    "        for node_idx in bu_nodes:\n",
    "            # 构建该节点的一阶邻居子图\n",
    "            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)\n",
    "\n",
    "            # 获取节点特征\n",
    "            _, node_feature = bsne_model([node_subgraph])\n",
    "\n",
    "            bu_node_features.append(node_feature.squeeze(0))\n",
    "\n",
    "        # 将该Bu子图所有节点的特征收集起来\n",
    "        bu_features_batch.append(torch.stack(bu_node_features))  # [sample_size+1, d_model]\n",
    "\n",
    "    # 针对每一对Bp和Bu子图计算loss\n",
    "    for i in range(batch_size):\n",
    "        # 获取中心节点索引和特征\n",
    "        center_idx = batch_centers[i]\n",
    "        center_feature = bp_features_batch[i][0]  # 中心节点是第一个\n",
    "\n",
    "        # 仅获取Bp子图中的其他节点（排除中心节点）\n",
    "        other_bp_indices = bp_subgraphs[i][1:]\n",
    "        other_bp_features = bp_features_batch[i][1:]\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的原始距离\n",
    "        orig_dists_bp = dist_matrix[center_idx][other_bp_indices].cpu().numpy()\n",
    "\n",
    "        # 计算中心节点到其他Bp节点的特征距离\n",
    "        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), other_bp_features).squeeze(0)\n",
    "\n",
    "        # 计算P向量（基于节点路径距离）\n",
    "    #     P = (1.0 + orig_dists_bp ** 2) ** -1\n",
    "        P = np.exp(-orig_dists_bp)\n",
    "    #     P = np.power(10.0, -orig_dists_bp)\n",
    "        P_sum = P.sum()\n",
    "        P = P / (P_sum + eps)\n",
    "        P = torch.tensor(P, device=device)\n",
    "\n",
    "        # 计算Q向量（基于特征距离）\n",
    "    #     Q = (1.0 + feat_dists_bp ** 2 / temperature) ** -1\n",
    "    #     Q = np.exp(-feat_dists_bp.detach().numpy())\n",
    "        Q = np.exp(-feat_dists_bp.detach().cpu().numpy())\n",
    "        Q_sum = Q.sum()\n",
    "        Q = Q / (Q_sum + eps)\n",
    "        Q = torch.tensor(Q, device=device)\n",
    "        \n",
    "        \n",
    "        log_ratio = torch.log(P + eps) - torch.log(Q + eps)\n",
    "        loss_local = log_ratio.mean()  # 公式(8)第一项\n",
    "\n",
    "        # 获取全局距离并计算概率\n",
    "        all_dists = dist_matrix[center_idx].cpu().numpy()\n",
    "        unnorm_probs_global = np.exp(-all_dists)\n",
    "        unnorm_probs_global[center_idx] = 0  # 排除自身\n",
    "\n",
    "        # 计算全局归一化常数\n",
    "        Z_global = unnorm_probs_global.sum() + eps\n",
    "\n",
    "        # 计算B_p节点的全局概率和\n",
    "        bp_global_prob_sum = unnorm_probs_global[other_bp_indices].sum() / Z_global\n",
    "\n",
    "        # 计算k_Bp (公式定义)\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = bp_global_prob_sum * (N / len(other_bp_indices))\n",
    "\n",
    "        bu_features = bu_features_batch[i][1:]  # 排除中心节点\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))\n",
    "\n",
    "        # = 5. 损失合并 =\n",
    "        total_loss += loss_local + loss_global\n",
    "        \n",
    "        print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "        # 局部损失（KL散度）\n",
    "        loss_local = (torch.log(P + eps) - torch.log(Q + eps)).sum() / (len(bp_features_batch[i]-1))\n",
    "        \n",
    "\n",
    "        # 计算k_Bp\n",
    "        # 获取中心节点到图中所有节点的距离\n",
    "        all_dists = dist_matrix[center_idx]  # [N]\n",
    "\n",
    "    #     \n",
    "    #     unnorm_probs_all = (1.0 + all_dists ** 2) ** -1\n",
    "    # \n",
    "        unnorm_probs_all = np.exp(-all_dists.cpu().numpy())\n",
    "    #     \n",
    "    #     unnorm_probs_all = np.power(10.0, -all_dists.cpu().numpy())\n",
    "        # 排除中心节点自身（设置自身概率为0）\n",
    "    #     unnorm_probs_all = unnorm_probs_all.clone()  # 避免修改原始数据\n",
    "        unnorm_probs_all[center_idx] = 0\n",
    "        Z_i = unnorm_probs_all.sum()\n",
    "        sum_p_cond = P.sum()\n",
    "        N = dist_matrix.shape[0]\n",
    "        k_Bp = (sum_p_cond * N / len(other_bp_indices))/Z_i\n",
    "\n",
    "        # 计算中心节点到Bu子图节点的特征距离\n",
    "        bu_features = bu_features_batch[i]\n",
    "        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)\n",
    "\n",
    "        # 计算指数距离之和\n",
    "        sum_e_bu = torch.exp(-feat_dists_bu).sum()\n",
    "        sum_e_bp = torch.exp(-feat_dists_bp).sum()\n",
    "\n",
    "        # 全局损失\n",
    "        global_ratio = k_Bp * (sum_e_bu / (sum_e_bp + eps))\n",
    "        loss_global = torch.log(global_ratio.clamp(min=eps, max=1e10))  # 限制范围\n",
    "\n",
    "        # 合并损失\n",
    "        total_loss += loss_local + loss_global\n",
    "#         print('P:',P)\n",
    "#         print('Q:',Q)\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        '''\n",
    "    bsne_loss = total_loss / batch_size\n",
    "\n",
    "    bsne_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    # 更新最佳损失和早停计数器\n",
    "    if bsne_loss.item() < pretrain_best_loss:\n",
    "        pretrain_best_loss = bsne_loss.item()\n",
    "        pretrain_no_improve = 0\n",
    "    else:\n",
    "        pretrain_no_improve += 1\n",
    "\n",
    "    if pretrain_no_improve >= args['pretrain_patience']:\n",
    "        print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "        pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', bsne_loss.item(), epoch)\n",
    "    print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n",
    "#     if epoch % 5 == 0:\n",
    "#         print(f'Pretrain Epoch: {epoch:03d}, BSNE Loss: {bsne_loss.item():.4f}')\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "021f65f9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c154254",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35856941",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b90f5a9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 0.0738 | Val AUC: 0.7377 | Val F1: 0.4813\n",
      "Epoch: 005 | Loss: 0.0327 | Val AUC: 0.7711 | Val F1: 0.4751\n",
      "Epoch: 010 | Loss: 0.0570 | Val AUC: 0.7901 | Val F1: 0.4751\n",
      "Epoch: 015 | Loss: 0.0130 | Val AUC: 0.7999 | Val F1: 0.4751\n",
      "Epoch: 020 | Loss: 0.0194 | Val AUC: 0.8048 | Val F1: 0.4751\n",
      "Epoch: 025 | Loss: 0.0133 | Val AUC: 0.8058 | Val F1: 0.4751\n",
      "Epoch: 030 | Loss: 0.0061 | Val AUC: 0.8073 | Val F1: 0.4751\n",
      "Epoch: 040 | Loss: 0.0052 | Val AUC: 0.8104 | Val F1: 0.4751\n",
      "Epoch: 045 | Loss: 0.0132 | Val AUC: 0.8145 | Val F1: 0.4751\n",
      "Epoch: 050 | Loss: 0.0106 | Val AUC: 0.8172 | Val F1: 0.4751\n",
      "Epoch: 055 | Loss: 0.0579 | Val AUC: 0.8230 | Val F1: 0.4751\n",
      "Epoch: 060 | Loss: 0.0575 | Val AUC: 0.8258 | Val F1: 0.4751\n",
      "Epoch: 065 | Loss: 0.0387 | Val AUC: 0.8291 | Val F1: 0.4751\n",
      "Epoch: 070 | Loss: 0.0201 | Val AUC: 0.8295 | Val F1: 0.4751\n",
      "Epoch: 075 | Loss: 0.0790 | Val AUC: 0.8334 | Val F1: 0.4751\n",
      "Epoch: 080 | Loss: 0.0061 | Val AUC: 0.8310 | Val F1: 0.4751\n",
      "Epoch: 085 | Loss: 0.1036 | Val AUC: 0.8356 | Val F1: 0.4751\n",
      "Epoch: 090 | Loss: 0.0074 | Val AUC: 0.8397 | Val F1: 0.4751\n",
      "Epoch: 095 | Loss: 0.0090 | Val AUC: 0.8435 | Val F1: 0.4751\n",
      "Epoch: 100 | Loss: 0.0235 | Val AUC: 0.8443 | Val F1: 0.4751\n",
      "Epoch: 105 | Loss: 0.0167 | Val AUC: 0.8424 | Val F1: 0.4813\n",
      "Epoch: 110 | Loss: 0.0604 | Val AUC: 0.8468 | Val F1: 0.4811\n",
      "Epoch: 115 | Loss: 0.0043 | Val AUC: 0.8485 | Val F1: 0.4811\n",
      "Epoch: 120 | Loss: 0.1123 | Val AUC: 0.8517 | Val F1: 0.4811\n",
      "Epoch: 125 | Loss: 0.0068 | Val AUC: 0.8527 | Val F1: 0.4811\n",
      "Epoch: 130 | Loss: 0.0340 | Val AUC: 0.8542 | Val F1: 0.4927\n",
      "Epoch: 135 | Loss: 0.0160 | Val AUC: 0.8595 | Val F1: 0.5102\n",
      "Epoch: 140 | Loss: 0.0374 | Val AUC: 0.8611 | Val F1: 0.5240\n"
     ]
    }
   ],
   "source": [
    "# 仅进行分类测试（新）\n",
    "\n",
    "# 参数设置\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 8,\n",
    "    \"sample_size\": 20,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 100,\n",
    "    \"pretrain_lr\": 0.0001,\n",
    "    \"finetune_lr\": 0.0001,\n",
    "    \"num_epochs\": 200,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 4,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "# 准备特征数据\n",
    "feat_data = torch.tensor(feat_data).float()\n",
    "# 特征归一化\n",
    "scaler = MinMaxScaler()\n",
    "feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)\n",
    "\n",
    "# 初始化模型\n",
    "bsne_model = BSNE_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=64,\n",
    "    nhead=args['num_heads'],\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate']\n",
    ").to(device)\n",
    "\n",
    "# 将边索引转移到设备\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    bsne_model.parameters(),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)\n",
    "        #         print(len(subgraph['features']))\n",
    "        subgraph_data.append(subgraph)\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]\n",
    "\n",
    "    # 提取中心节点标签\n",
    "    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()\n",
    "\n",
    "    # 计算分类损失\n",
    "    cls_loss = F.nll_loss(center_logits, labels_center)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "# 加载最佳模型\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "\n",
    "# 最终测试\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "67151168",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "237718be",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e77289c4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c837fb15",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5365c63",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7bb7c130",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0732cb94",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "db830b11",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor(5.0090, device='cuda:0', grad_fn=<SumBackward0>)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Q_sum"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "5b739c47",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2 1 1 1 2 2 2 2 2 2 2 1 2 1 2 2 2 2 2 2]\n",
      "P: tensor([0.0364, 0.0909, 0.0909, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364, 0.0364,\n",
      "        0.0364, 0.0364, 0.0909, 0.0364, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364,\n",
      "        0.0364, 0.0364], device='cuda:0', dtype=torch.float64)\n",
      "Q: tensor([0.0481, 0.0558, 0.0499, 0.0491, 0.0554, 0.0540, 0.0510, 0.0545, 0.0443,\n",
      "        0.0532, 0.0520, 0.0463, 0.0474, 0.0519, 0.0491, 0.0479, 0.0514, 0.0430,\n",
      "        0.0455, 0.0504], device='cuda:0', grad_fn=<DivBackward0>)\n",
      "tensor(1.0000, device='cuda:0', dtype=torch.float64)\n",
      "tensor(1., device='cuda:0', grad_fn=<SumBackward0>)\n"
     ]
    }
   ],
   "source": [
    "print(orig_dists_bp)\n",
    "print(\"P:\",P)\n",
    "print(\"Q:\",Q)\n",
    "print(P.sum())\n",
    "print(Q.sum())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "d8b4d0d6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0, 2, 2,  ..., 2, 2, 2],\n",
       "        [2, 0, 2,  ..., 2, 2, 2],\n",
       "        [2, 2, 0,  ..., 2, 2, 2],\n",
       "        ...,\n",
       "        [2, 2, 2,  ..., 0, 1, 1],\n",
       "        [2, 2, 2,  ..., 1, 0, 1],\n",
       "        [2, 2, 2,  ..., 1, 1, 0]], device='cuda:0', dtype=torch.int32)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dist_matrix"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3eac6f4e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([0.0364, 0.0909, 0.0909, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364, 0.0364,\n",
       "        0.0364, 0.0364, 0.0909, 0.0364, 0.0909, 0.0364, 0.0364, 0.0364, 0.0364,\n",
       "        0.0364, 0.0364], device='cuda:0', dtype=torch.float64)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "P"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "ec3d1ca5",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([8.01726913e-05, 8.01726913e-05, 8.01726913e-05, ...,\n",
       "       8.01726913e-05, 8.01726913e-05, 8.01726913e-05])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "probs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2fdb02fe",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 5103: distance counts: {0: 1, 1: 459, 2: 11334, 3: 150}\n",
      "Center 4736: distance counts: {0: 1, 1: 1919, 2: 10005, 3: 19}\n",
      "Center 9863: distance counts: {0: 1, 1: 580, 2: 11188, 3: 175}\n",
      "Center 10791: distance counts: {0: 1, 1: 98, 2: 9597, 3: 2248}\n",
      "Center 3137: distance counts: {0: 1, 1: 237, 2: 11442, 3: 264}\n",
      "Center 1875: distance counts: {0: 1, 1: 865, 2: 10998, 3: 80}\n",
      "Center 2359: distance counts: {0: 1, 1: 87, 2: 10409, 3: 1447}\n",
      "Center 475: distance counts: {0: 1, 1: 1813, 2: 10098, 3: 32}\n"
     ]
    }
   ],
   "source": [
    "for xi in batch_centers:\n",
    "    dist_row = dist_matrix[xi].cpu().numpy()\n",
    "    unique, counts = np.unique(dist_row, return_counts=True)\n",
    "    print(f\"Center {xi}: distance counts: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "42b1d988",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 5103,  4736,  9863, 10791,  3137,  1875,  2359,   475])"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "batch_centers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "20ee4022",
   "metadata": {},
   "outputs": [],
   "source": [
    "dist = dist_matrix[6071].cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "527696e8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([2, 2, 2, ..., 2, 2, 2], dtype=int32)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dist"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "d3f4a9f9",
   "metadata": {},
   "outputs": [],
   "source": [
    "pb = np.exp(-dist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "82ad1aba",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.13533528, 0.13533528, 0.13533528, ..., 0.13533528, 0.13533528,\n",
       "       0.13533528])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "bcac101f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([7.62116273e-05, 7.62116273e-05, 7.62116273e-05, ...,\n",
       "       7.62116273e-05, 7.62116273e-05, 7.62116273e-05])"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pb /= pb.sum()\n",
    "pb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "05d677ed",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Center 6071: probs: {2.803669087973917e-05: 102, 7.621162734851842e-05: 11122, 0.00020716468173877003: 719, 0.0005631319898689999: 1}\n"
     ]
    }
   ],
   "source": [
    "\n",
    "unique, counts = np.unique(pb, return_counts=True)\n",
    "print(f\"Center 6071: probs: {dict(zip(unique, counts))}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "fd098860",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.8476257193702219"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "11122*7.621162734851842e-05"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "7a20b676",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.03665072072437298"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "0.8476257193702219**20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6739417b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b837e37a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1743808b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39aabd10",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
