{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "855d4fe2",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "import random as rd\n",
    "import numpy as np\n",
    "import scipy.sparse as sp\n",
    "from scipy.io import loadmat\n",
    "import copy as cp\n",
    "from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, confusion_matrix\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.preprocessing import MinMaxScaler\n",
    "from sklearn.manifold import TSNE\n",
    "import torch\n",
    "import os\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from torch_geometric.utils import to_dense_adj, subgraph\n",
    "import torch.nn.functional as F\n",
    "import torch.nn as nn\n",
    "import time\n",
    "import copy\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from sklearn.model_selection import train_test_split\n",
    "import dgl\n",
    "from collections import defaultdict\n",
    "\n",
    "from transformers.models.bert.modeling_bert import BertConfig,BertEncoder\n",
    "from torch.nn import init\n",
    "\n",
    "\n",
    "filelist = {\n",
    "    'amz_upu': 'amz_upu_adjlists.pickle',\n",
    "    'amz_usu': 'amz_usu_adjlists.pickle',\n",
    "    'amz_uvu': 'amz_uvu_adjlists.pickle',\n",
    "    'yelp_rsr': 'yelp_rsr_adjlists.pickle',\n",
    "    'yelp_rtr': 'yelp_rtr_adjlists.pickle',\n",
    "    'yelp_rur': 'yelp_rur_adjlists.pickle'\n",
    "}\n",
    "\n",
    "file_matrix_prefix = {\n",
    "    'amz_upu': 'amazon_upu_matrix_',\n",
    "    'amz_usu': 'amazon_usu_matrix_',\n",
    "    'amz_uvu': 'amazon_uvu_matrix_',\n",
    "    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',\n",
    "    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',\n",
    "    'yelp_rur': 'yelpnet_rur_matrix_decompision_'\n",
    "}\n",
    "'''\n",
    "def calculate_tsne_loss(emb_p, emb_u, dist_sub_p, dist_matrix, batch_p_global, batch_u_global, temperature=1,\n",
    "                        eps=1e-12):\n",
    "    \"\"\"\n",
    "    :param emb_p: B_p子图嵌入 [batch_size, d_model]\n",
    "    :param emb_u: B_u全局嵌入 [batch_size, d_model]\n",
    "    :param dist_sub_p: B_p子图距离矩阵 [subgraph_size, subgraph_size]\n",
    "    :param dist_matrix: 全图距离矩阵 [num_nodes, num_nodes]\n",
    "    :param batch_p_global: B_p节点的全局索引\n",
    "    :param batch_u_global: B_u节点的全局索引\n",
    "    \"\"\"\n",
    "    device = emb_p.device\n",
    "    batch_size = emb_p.size(0)\n",
    "\n",
    "    # --- 局部项：基于B_p子图 ---\n",
    "    # 生成子图局部索引映射表\n",
    "    subnode_to_local = {node: i for i, node in enumerate(batch_p_global)}\n",
    "    local_indices = [subnode_to_local[node] for node in batch_p_global]\n",
    "\n",
    "    # 提取局部距离矩阵\n",
    "    dist_p = dist_sub_p[local_indices][:, local_indices]\n",
    "\n",
    "    # 计算P\n",
    "    # P = torch.exp(-dist_p ** 2)\n",
    "    P = (1.0 + dist_p ** 2) ** -1\n",
    "    P.fill_diagonal_(0)\n",
    "    P = (P + P.T) / 2  # 对称化\n",
    "    P = P / (P.sum(dim=1, keepdim=True) + eps)\n",
    "    # P = P / P.sum()\n",
    "    P = torch.clamp(P, min=eps)\n",
    "\n",
    "    # 计算Q\n",
    "    # pairwise_dist = torch.cdist(emb_p, emb_p)\n",
    "    pairwise_dist = torch.cdist(emb_p, emb_p, p=2)\n",
    "    Q = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "    Q.fill_diagonal_(0)\n",
    "    Q = (Q + Q.T) / 2\n",
    "    Q = Q / (Q.sum(dim=1, keepdim=True) + eps)\n",
    "    # Q = Q / Q.sum()\n",
    "    Q = torch.clamp(Q, min=eps)\n",
    "\n",
    "    # print(len(local_indices))\n",
    "    # print(len(dist_p))\n",
    "    # print(len(emb_p))\n",
    "    # print(P.sum(),Q.sum())\n",
    "\n",
    "    loss_local = (torch.log(P) - torch.log(Q)).mean()\n",
    "\n",
    "\n",
    "    # --- 全局项 ---\n",
    "    # 计算emb_p到emb_u的距离（平方欧氏距离）\n",
    "    dist_pu_sq = torch.cdist(emb_p, emb_u, p=2) ** 2\n",
    "    d_bu = (1.0 + dist_pu_sq / temperature) ** -1\n",
    "    d_bu = d_bu.sum(dim=1)\n",
    "\n",
    "    # 计算emb_p内部的距离（平方欧氏距离）\n",
    "    pairwise_dist_sq = pairwise_dist ** 2\n",
    "    d_bp = (1.0 + pairwise_dist_sq / temperature) ** -1\n",
    "    d_bp = d_bp.sum(dim=1) + eps\n",
    "\n",
    "    # 计算k_Bp（保持原逻辑）\n",
    "    p_xi_full = (1.0 + dist_matrix ** 2) ** -1  # 原始空间的学生t核\n",
    "    sum_p_xi = p_xi_full[batch_p_global][:, batch_p_global].sum(dim=1)\n",
    "    k_Bp = (sum_p_xi / p_xi_full[batch_p_global].sum(dim=1)) * (dist_matrix.shape[0] / batch_size)\n",
    "\n",
    "    ratio = (k_Bp.unsqueeze(1) * d_bu) / d_bp.unsqueeze(1)\n",
    "    loss_global = torch.log(ratio.clamp(min=eps)).mean()\n",
    "\n",
    "    # print(len(k_Bp),f\"k_Bp: {k_Bp}\")\n",
    "    # print(f\"d_bu mean: {d_bu.mean().item()}, d_bu max: {d_bu.max().item()}, d_bu min: {d_bu.min().item()}\")\n",
    "    # print(f\"d_bp mean: {d_bp.mean().item()}, d_bp max: {d_bp.max().item()}, d_bp min: {d_bp.min().item()}\")\n",
    "    #\n",
    "    #\n",
    "    # print(f\"P mean: {P.mean().item()}, P max: {P.max().item()}, P min: {P.min().item()}\")\n",
    "    # print(f\"Q mean: {Q.mean().item()}, Q max: {Q.max().item()}, Q min: {Q.min().item()}\")\n",
    "    # print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "    return loss_local + loss_global\n",
    "'''\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def sample_uniform_batch(num_nodes, sample_size, device):\n",
    "    \"\"\"从全图均匀采样节点（用于全局项）\"\"\"\n",
    "    return torch.randint(0, num_nodes, (sample_size,), device=device).tolist()\n",
    "\n",
    "# def find_sigma(distances, target_perplexity, tol=1e-5, max_iter=50):\n",
    "#     \"\"\"\n",
    "#         通过二分查找法寻找合适的 Sigma 值，使得高斯分布的困惑度接近目标困惑度\n",
    "#         :param distances: 节点间距离矩阵\n",
    "#         :param target_perplexity: 目标困惑度值\n",
    "#         :param tol: 容忍误差\n",
    "#         :param max_iter: 最大迭代次数\n",
    "#         :return: 计算得到的 Sigma 值\n",
    "#     \"\"\"\n",
    "#     distances = distances.detach()\n",
    "#     sigmas = []\n",
    "#     for row in distances:\n",
    "#         sigma_min, sigma_max = 1e-5, 100.0\n",
    "#         best_sigma = sigma_max\n",
    "#         for _ in range(max_iter):\n",
    "#             sigma = (sigma_min + sigma_max) / 2\n",
    "#             p = torch.exp(-row ** 2 / (2 * sigma ** 2))\n",
    "#             p = p / (p.sum() + 1e-8)\n",
    "#             entropy = -torch.sum(p * torch.log2(p + 1e-12))\n",
    "#             current_perplexity = 2 ** entropy\n",
    "\n",
    "#             if abs(current_perplexity - target_perplexity) < tol:\n",
    "#                 best_sigma = sigma\n",
    "#                 break\n",
    "\n",
    "#             if current_perplexity > target_perplexity:\n",
    "#                 sigma_max = sigma\n",
    "#             else:\n",
    "#                 sigma_min = sigma\n",
    "#         sigmas.append(best_sigma)\n",
    "#     return torch.tensor(sigmas, device=distances.device)\n",
    "\n",
    "def sample_batch_p(x, dist_row, batch_size, temperature=1.0):\n",
    "    \"\"\"\n",
    "        根据距离的概率分布采样邻居节点\n",
    "        :param x: 当前节点的索引\n",
    "        :param dist_row: 当前节点与其他所有节点的距离向量\n",
    "        :param batch_size: 采样批次的大小\n",
    "        :param temperature: 温度参数，控制采样的集中程度\n",
    "        :return: 采样得到的邻居节点索引列表\n",
    "    \"\"\"\n",
    "    probs = torch.softmax(-dist_row / temperature, dim=-1)\n",
    "    probs[x] = 0\n",
    "    probs = probs / probs.sum()\n",
    "    batch = torch.multinomial(probs, batch_size, replacement=False)\n",
    "    return batch.tolist()\n",
    "\n",
    "def sample_subgraph(batch_centers, dist_matrix, sample_size=20, temperature=0.1):\n",
    "    \"\"\"\n",
    "        采样子图，包含中心节点及其邻居节点\n",
    "        :param batch_centers: 中心节点的索引列表\n",
    "        :param dist_matrix: 节点间距离矩阵\n",
    "        :param sample_size: 每个中心节点采样的邻居数量\n",
    "        :param temperature: 温度参数\n",
    "        :return: 采样得到的子图节点索引列表\n",
    "    \"\"\"\n",
    "    sub_nodes = set(batch_centers)\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.exp(-dist_row / temperature)\n",
    "        probs[xi] = 0\n",
    "        probs /= probs.sum()\n",
    "        neighbors = np.random.choice(len(dist_row), size=sample_size, p=probs, replace=False)\n",
    "        sub_nodes.update(neighbors.tolist())\n",
    "    sub_nodes = list(sub_nodes)\n",
    "    return sub_nodes\n",
    "\n",
    "def get_subgraph_edges(edge_index, sub_nodes):\n",
    "    \"\"\"\n",
    "        提取子图包含的边\n",
    "        :param edge_index: 原图的边索引\n",
    "        :param sub_nodes: 子图的节点索引列表\n",
    "        :return: 子图的边索引\n",
    "    \"\"\"\n",
    "    mask = torch.isin(edge_index[0], sub_nodes) & torch.isin(edge_index[1], sub_nodes)\n",
    "    return edge_index[:, mask]\n",
    "\n",
    "def iterate_batches(indices, batch_size, shuffle=True):\n",
    "    \"\"\"\n",
    "        将索引列表划分为指定大小的批次\n",
    "        :param indices: 样本索引列表\n",
    "        :param batch_size: 每个批次的大小\n",
    "        :param shuffle: 是否打乱顺序\n",
    "        :return: 生成批次索引的迭代器\n",
    "    \"\"\"\n",
    "    if shuffle:\n",
    "        rd.shuffle(indices)\n",
    "    for i in range(0, len(indices), batch_size):\n",
    "        yield indices[i:i + batch_size]\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    TP = cm[1, 1]\n",
    "    TN = cm[0, 0]\n",
    "    FP = cm[0, 1]\n",
    "    FN = cm[1, 0]\n",
    "    sensitivity = TP / (TP + FN)\n",
    "    specificity = TN / (TN + FP)\n",
    "    g_mean = np.sqrt(sensitivity * specificity)\n",
    "    return g_mean\n",
    "\n",
    "def dict_to_edge_index(edge_dict):\n",
    "    source_nodes = []\n",
    "    target_nodes = []\n",
    "    for src, targets in edge_dict.items():\n",
    "        for target in targets:\n",
    "            source_nodes.append(src)\n",
    "            target_nodes.append(target)\n",
    "    edge_index = [source_nodes, target_nodes]\n",
    "    return torch.LongTensor(edge_index)\n",
    "\n",
    "def numpy_array_to_edge_index(np_array):\n",
    "    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], \"Input must be a square matrix.\"\n",
    "    rows, cols = np.nonzero(np_array)\n",
    "    edge_index = np.vstack((rows, cols))\n",
    "    edge_index_tensor = torch.from_numpy(edge_index).long()\n",
    "    return edge_index_tensor\n",
    "\n",
    "def load_data(data, k=2, prefix=''):\n",
    "    pickle_file = {}\n",
    "    matrix_prefix = {}\n",
    "    for key in filelist:\n",
    "        pickle_file[key] = os.path.join(prefix, filelist[key])\n",
    "        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])\n",
    "\n",
    "    if data == 'yelp':\n",
    "        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['yelp_rur'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rtr'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['yelp_rsr'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree], [relation3, relation3_tree]], feat_data, labels\n",
    "    elif data == 'amazon':\n",
    "        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))\n",
    "        labels = data_file['label'].flatten()\n",
    "        feat_data = data_file['features'].todense().A\n",
    "        with open(pickle_file['amz_upu'], 'rb') as file:\n",
    "            relation1 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation1 = dict_to_edge_index(relation1)\n",
    "        relation1_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation1_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_usu'], 'rb') as file:\n",
    "            relation2 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation2 = dict_to_edge_index(relation2)\n",
    "        relation2_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation2_tree.append(numpy_array_to_edge_index(tree))\n",
    "        with open(pickle_file['amz_uvu'], 'rb') as file:\n",
    "            relation3 = pickle.load(file)\n",
    "        file.close()\n",
    "        relation3_tree = []\n",
    "        for i in range(1, k + 1):\n",
    "            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)\n",
    "            with open(file_name, 'rb') as file:\n",
    "                tree = pickle.load(file)\n",
    "            file.close()\n",
    "            relation3_tree.append(numpy_array_to_edge_index(tree))\n",
    "        relation3 = dict_to_edge_index(relation3)\n",
    "\n",
    "        return [[relation1, relation1_tree], [relation2, relation2_tree], [relation3, relation3_tree]], feat_data, labels\n",
    "\n",
    "\n",
    "def Visualization(labels, embedding, prefix):\n",
    "    train_pos, train_neg = pos_neg_split(list(range(len(labels))), labels)\n",
    "    sampled_idx_train = undersample(train_pos, train_neg, scale=1)\n",
    "    tsne = TSNE(n_components=2, random_state=43)\n",
    "    sampled_idx_train = np.array(sampled_idx_train)\n",
    "    sampled_idx_train = np.random.choice(sampled_idx_train, size=5000, replace=True)\n",
    "    ps = embedding[sampled_idx_train]\n",
    "    ls = labels[sampled_idx_train]\n",
    "\n",
    "    X_reduced = tsne.fit_transform(ps)\n",
    "\n",
    "    scaler = MinMaxScaler(feature_range=(0, 1))\n",
    "    X_scaled = scaler.fit_transform(X_reduced)\n",
    "    print(X_scaled.shape)\n",
    "\n",
    "    plt.figure(figsize=(8, 8))\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 0, 0], X_scaled[ls == 0, 1], c='#14517C', label='Label 0', s=3)\n",
    "\n",
    "    plt.scatter(X_scaled[ls == 1, 0], X_scaled[ls == 1, 1], c='#FA7F6F', label='Label 1', s=3)\n",
    "\n",
    "    ax = plt.gca()\n",
    "    ax.spines['top'].set_visible(False)\n",
    "    ax.spines['right'].set_visible(False)\n",
    "    ax.spines['left'].set_visible(False)\n",
    "    ax.spines['bottom'].set_visible(False)\n",
    "\n",
    "    plt.xticks([])\n",
    "    plt.yticks([])\n",
    "\n",
    "    plt.xlim(0, 1)\n",
    "    plt.ylim(0, 1)\n",
    "    filepath = os.path.join(prefix, 'HOGRL.png')\n",
    "    plt.savefig(filepath)\n",
    "    plt.show()\n",
    "\n",
    "def normalize(mx):\n",
    "    rowsum = np.array(mx.sum(1)) + 0.01\n",
    "    r_inv = np.power(rowsum, -1).flatten()\n",
    "    r_inv[np.isinf(r_inv)] = 0.\n",
    "    r_mat_inv = sp.diags(r_inv)\n",
    "    mx = r_mat_inv.dot(mx)\n",
    "    return mx\n",
    "\n",
    "def pos_neg_split(nodes, labels):\n",
    "    # 正负样本分割\n",
    "    pos_nodes = []\n",
    "    neg_nodes = cp.deepcopy(nodes)\n",
    "    aux_nodes = cp.deepcopy(nodes)\n",
    "    for idx, label in enumerate(labels):\n",
    "        if label == 1:\n",
    "            pos_nodes.append(aux_nodes[idx])\n",
    "            neg_nodes.remove(aux_nodes[idx])\n",
    "\n",
    "    return pos_nodes, neg_nodes\n",
    "\n",
    "def undersample(pos_nodes, neg_nodes, scale=1):\n",
    "    # 对负样本进行下采样，平衡正负样本数量\n",
    "    aux_nodes = cp.deepcopy(neg_nodes)\n",
    "    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))\n",
    "    batch_nodes = pos_nodes + aux_nodes\n",
    "\n",
    "    return batch_nodes\n",
    "\n",
    "def calculate_g_mean(y_true, y_pred):\n",
    "    cm = confusion_matrix(y_true, y_pred)\n",
    "    sensitivities = []\n",
    "    for i in range(len(cm)):\n",
    "        TP = cm[i, i]\n",
    "        FN = cm[i, :].sum() - TP\n",
    "        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0\n",
    "        sensitivities.append(sensitivity)\n",
    "    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))\n",
    "    return g_mean\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device):\n",
    "    model.eval()\n",
    "    # with torch.no_grad():\n",
    "    #     logits, _ = model(feat_data.to(device), edge_indexs, sub_nodes=None)\n",
    "    #     x_softmax = torch.exp(logits).cpu().detach()\n",
    "    #     positive_class_probs = x_softmax[:, 1].numpy()[np.array(idx_eval)]\n",
    "    #     auc_score = roc_auc_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "    #     ap_score = average_precision_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "    #     label_prob = (np.array(positive_class_probs) >= 0.5).astype(int)\n",
    "    #     f1_score_val = f1_score(np.array(y_eval), label_prob, average='macro')\n",
    "    #     g_mean = calculate_g_mean(np.array(y_eval), label_prob)\n",
    "    #\n",
    "    # return auc_score, ap_score, f1_score_val, g_mean\n",
    "    with torch.no_grad():\n",
    "        subgraph_data = []\n",
    "\n",
    "        for xi in idx_eval:\n",
    "            sub_nodes = [xi]\n",
    "            sub_edge_index = []\n",
    "            for rel_idx in range(len(edge_indexs)):\n",
    "                edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "                mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "                local_edges = edge_index[:, mask]\n",
    "\n",
    "                node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "                src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "                dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "\n",
    "                if len(src_nodes) > 0:\n",
    "                    edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "                else:\n",
    "                    edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "                sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "            subgraph_data.append({\n",
    "                'features': feat_data[sub_nodes].clone().detach(),\n",
    "                'edges': sub_edge_index,\n",
    "                'global_idx': sub_nodes\n",
    "            })\n",
    "\n",
    "        logits, _ = model(subgraph_data)\n",
    "        x_softmax = torch.exp(logits).cpu().detach()\n",
    "        positive_class_probs = x_softmax[:, 1].numpy()\n",
    "        auc_score = roc_auc_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        ap_score = average_precision_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        label_prob = (np.array(positive_class_probs) >= 0.5).astype(int)\n",
    "        f1_score_val = f1_score(np.array(y_eval), label_prob, average='macro')\n",
    "        g_mean = calculate_g_mean(np.array(y_eval), label_prob)\n",
    "\n",
    "    return auc_score, ap_score, f1_score_val, g_mean\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "661e6d32",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0d7b8ed0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2dd34ef8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d61435c2",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "\n",
    "class multi_HOGRL_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=5, dim_feedforward=768,\n",
    "                 drop_rate=0.6, layers_tree=2, tsne_weight=0.1):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.layers_tree = layers_tree\n",
    "        self.feature_proj = nn.Linear(in_feat, d_model)\n",
    "\n",
    "\n",
    "        bert_config = BertConfig(\n",
    "            hidden_size=d_model,\n",
    "            num_hidden_layers=num_layers,\n",
    "            num_attention_heads=nhead,\n",
    "            intermediate_size=dim_feedforward,\n",
    "            hidden_dropout_prob=drop_rate,\n",
    "            attention_probs_dropout_prob=drop_rate,\n",
    "        )\n",
    "        self.transformer_encoders = nn.ModuleList([\n",
    "            BertEncoder(bert_config)\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.tree_projs = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Sequential(\n",
    "                nn.Linear(d_model, dim_feedforward),\n",
    "                nn.ReLU(),\n",
    "                nn.Linear(dim_feedforward, d_model)\n",
    "            ) for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.gating_networks = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Linear(d_model, 1)\n",
    "                           for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "\n",
    "        self.tsne_weight = tsne_weight\n",
    "\n",
    "        # 初始化\n",
    "        self._init_weights()\n",
    "\n",
    "    def _init_weights(self):\n",
    "        for name, param in self.named_parameters():\n",
    "            if 'weight' in name:\n",
    "                if len(param.shape) >= 2:\n",
    "                    init.xavier_uniform_(param)\n",
    "            elif 'bias' in name:\n",
    "                init.zeros_(param)\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 步骤1：动态批处理准备\n",
    "        max_nodes = max(sg['features'].size(0) for sg in subgraph_batch)\n",
    "        batch_size = len(subgraph_batch)\n",
    "        \n",
    "        # 步骤2：构建填充后的张量\n",
    "        padded_features = []\n",
    "        padded_adjs = {rel: [] for rel in range(self.relation_nums)}\n",
    "        tree_adjs = {tree_level: [] for tree_level in range(self.layers_tree)} \n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 特征填充\n",
    "            feat = F.pad(sg['features'], (0, 0, 0, max_nodes - sg['features'].size(0)))\n",
    "            padded_features.append(feat)\n",
    "            \n",
    "            # 邻接矩阵处理\n",
    "            for rel in range(self.relation_nums):\n",
    "                edge_index = sg['edges'][rel]\n",
    "                adj = to_dense_adj(edge_index, max_num_nodes=max_nodes)[0]\n",
    "                padded_adjs[rel].append(adj)\n",
    "                \n",
    "            # 树结构处理\n",
    "            for tree_level in range(self.layers_tree):\n",
    "                if tree_level < len(sg['edges']) - 1:  # 确保树结构存在\n",
    "                    edge_index = sg['edges'][tree_level + 1]\n",
    "                    adj = to_dense_adj(edge_index, max_num_nodes=max_nodes, batch_size=None)[0].squeeze(0)\n",
    "                else:\n",
    "                    adj = torch.zeros(max_nodes, max_nodes, device=device)\n",
    "                tree_adjs[tree_level].append(adj)\n",
    "\n",
    "        # 转换为批量张量 [batch, max_nodes, max_nodes]\n",
    "        features = torch.stack(padded_features)  # [B, max_n, d_model]\n",
    "        relation_adjs = {k: torch.stack(v) for k, v in padded_adjs.items()} # [B, max_n, max_n]\n",
    "        tree_adjs = {k: torch.stack(v) for k, v in tree_adjs.items()} # [B, max_n, max_n]\n",
    "\n",
    "        # 步骤3：特征投影\n",
    "        x = self.feature_proj(features)  # [B, max_n, d_model]\n",
    "\n",
    "        # 多关系处理\n",
    "        relation_outputs = []\n",
    "        for rel_idx in range(self.relation_nums):\n",
    "            h = x.clone()\n",
    "            adj = relation_adjs[rel_idx].bool() # [B, max_n, max_n]\n",
    "            \n",
    "            # Transformer处理\n",
    "            encoder = self.transformer_encoders[rel_idx]\n",
    "            for layer in encoder.layer:\n",
    "                h = layer(h, attention_mask=adj.type(torch.FloatTensor).to(device))[0]\n",
    "                \n",
    "            # 树结构融合\n",
    "            tree_features = []\n",
    "            for tree_idx in range(self.layers_tree):\n",
    "                t_adj = tree_adjs.get(tree_idx, None)\n",
    "                if t_adj is not None:\n",
    "                    t_adj = t_adj.bool() # [B, max_n, max_n]\n",
    "\n",
    "                # 生成树特征\n",
    "                h_tree = self.tree_projs[rel_idx][tree_idx](x) # [B, max_n, d_model]\n",
    "\n",
    "                if t_adj is not None:\n",
    "                    h_tree = self.transformer_encoders[rel_idx].layer[-1](h_tree, attention_mask=t_adj.type(torch.FloatTensor).to(device))[0]\n",
    "                tree_features.append(h_tree)\n",
    "\n",
    "            # 门控融合\n",
    "            if tree_features:\n",
    "                gates = torch.stack(\n",
    "                    [self.gating_networks[rel_idx][i](t).squeeze(-1) for i, t in enumerate(tree_features)],\n",
    "                    dim=-1\n",
    "                )\n",
    "                alpha = F.softmax(gates, dim=-1)\n",
    "                fused = sum(\n",
    "                    t * alpha[..., i].unsqueeze(-1) \n",
    "                    for i, t in enumerate(tree_features)\n",
    "                )\n",
    "                h = h + fused\n",
    "\n",
    "            relation_outputs.append(h)\n",
    "            \n",
    "        combined = torch.cat(relation_outputs, dim=-1)  # [B, max_n, rel*d_model]\n",
    "        \n",
    "        logits = self.classifier(combined)  # [B, max_n, out_feat]\n",
    "        logits = F.log_softmax(logits, dim=-1)\n",
    "        \n",
    "        tsne_feats = torch.stack(relation_outputs, dim=2).mean(dim=2)  # [B, max_n, d_model]\n",
    "\n",
    "        return logits, tsne_feats\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e666ef89",
   "metadata": {},
   "outputs": [],
   "source": [
    "    \n",
    "'''\n",
    "class GraphTransformerLayer(nn.Module):\n",
    "    def __init__(self, d_model=256, nhead=8, dim_feedforward=768, dropout=0.1):\n",
    "        super().__init__()\n",
    "        self.linear1 = nn.Linear(d_model, dim_feedforward)\n",
    "        self.self_attn = nn.MultiheadAttention(\n",
    "            embed_dim=d_model,\n",
    "            num_heads=nhead,\n",
    "            dropout=dropout,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.linear2 = nn.Linear(dim_feedforward, d_model)\n",
    "        self.norm1 = nn.LayerNorm(d_model)\n",
    "        self.norm2 = nn.LayerNorm(d_model)\n",
    "        self.dropout1 = nn.Dropout(dropout)\n",
    "        self.dropout2 = nn.Dropout(dropout)\n",
    "        self.activation = nn.ReLU()\n",
    "\n",
    "    def forward(self, src, adj_mask=None):\n",
    "        attn_mask = self._create_attention_mask(adj_mask)\n",
    "\n",
    "        src2 = self.norm1(src)\n",
    "        src2, attn_weights = self.self_attn(\n",
    "            src2, src2, src2,\n",
    "            attn_mask=attn_mask\n",
    "        )\n",
    "        src = src + self.dropout1(src2)\n",
    "\n",
    "        src2 = self.norm2(src)\n",
    "        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n",
    "        src = src + self.dropout2(src2)\n",
    "        return src\n",
    "\n",
    "    def _create_attention_mask(self, adj_mask):\n",
    "        if adj_mask is None:\n",
    "            return None\n",
    "        mask = (adj_mask == 0).bool()\n",
    "        mask = mask.repeat(self.self_attn.num_heads, 1, 1)\n",
    "        return mask\n",
    "\n",
    "\n",
    "class multi_HOGRL_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=5, dim_feedforward=768,\n",
    "                 drop_rate=0.6, layers_tree=2, tsne_weight=0.1):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "        self.layers_tree = layers_tree\n",
    "        self.feature_proj = nn.Linear(in_feat, d_model)\n",
    "\n",
    "        self.transformer_layers = nn.ModuleList([\n",
    "            nn.ModuleList([GraphTransformerLayer(\n",
    "                d_model=d_model,\n",
    "                nhead=nhead,\n",
    "                dim_feedforward=dim_feedforward,\n",
    "                dropout=drop_rate\n",
    "            ) for _ in range(num_layers)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.tree_projs = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Sequential(\n",
    "                nn.Linear(d_model, dim_feedforward),\n",
    "                nn.ReLU(),\n",
    "                nn.Linear(dim_feedforward, d_model)\n",
    "            ) for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.gating_networks = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Linear(d_model, 1)\n",
    "                           for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "\n",
    "        self.tsne_weight = tsne_weight\n",
    "\n",
    "\n",
    "    def forward(self, subgraph_batch):\n",
    "        # 步骤1：动态批处理准备\n",
    "        max_nodes = max(sg['features'].size(0) for sg in subgraph_batch)\n",
    "        batch_size = len(subgraph_batch)\n",
    "        \n",
    "        # 步骤2：构建填充后的张量\n",
    "        padded_features = []\n",
    "        padded_adjs = {rel: [] for rel in range(self.relation_nums)}\n",
    "        tree_adjs = {tree_level: [] for tree_level in range(self.layers_tree)} \n",
    "\n",
    "        for sg in subgraph_batch:\n",
    "            # 特征填充\n",
    "            feat = F.pad(sg['features'], (0,0,0,max_nodes-sg['features'].size(0)))\n",
    "            padded_features.append(feat)\n",
    "            \n",
    "            # 邻接矩阵处理\n",
    "            for rel in range(self.relation_nums):\n",
    "                edge_index = sg['edges'][rel]\n",
    "                adj = to_dense_adj(edge_index, max_num_nodes=max_nodes)[0]\n",
    "                padded_adjs[rel].append(adj)\n",
    "                \n",
    "            # 树结构处理\n",
    "            for tree_level in range(self.layers_tree):\n",
    "                if tree_level < len(sg['edges'])-1:  # 确保树结构存在\n",
    "                    edge_index = sg['edges'][tree_level+1]\n",
    "                    adj = to_dense_adj(edge_index, max_num_nodes=max_nodes, batch_size=None)[0].squeeze(0)\n",
    "                else:\n",
    "                    adj = torch.zeros(max_nodes, max_nodes, device=device)\n",
    "                tree_adjs[tree_level].append(adj)\n",
    "\n",
    "        # 转换为批量张量 [batch, max_nodes, max_nodes]\n",
    "        features = torch.stack(padded_features)  # [B, max_n, d_model]\n",
    "#         print('features.shape:',features.shape)\n",
    "        relation_adjs = {k: torch.stack(v) for k,v in padded_adjs.items()} # [B, max_n, max_n]\n",
    "        tree_adjs = {k: torch.stack(v) for k,v in tree_adjs.items()} # [B, max_n, max_n]\n",
    "\n",
    "        # 步骤3：特征投影\n",
    "        x = self.feature_proj(features)  # [B, max_n, d_model]\n",
    "#         print('x:',x.shape)\n",
    "        # 多关系处理\n",
    "        relation_outputs = []\n",
    "        for rel_idx in range(self.relation_nums):\n",
    "            h = x.clone()\n",
    "            adj = relation_adjs[rel_idx].bool() # [B, max_n, max_n]\n",
    "            \n",
    "            # Transformer处理\n",
    "            for layer in self.transformer_layers[rel_idx]:\n",
    "                h = layer(h, adj) # 输出维度 [B, max_n, d_model]\n",
    "                \n",
    "            # 树结构融合\n",
    "            tree_features = []\n",
    "            for tree_idx in range(self.layers_tree):\n",
    "                t_adj = tree_adjs.get(tree_idx, None)\n",
    "                if t_adj is not None:\n",
    "                    t_adj = t_adj.bool() # [B, max_n, max_n]\n",
    "\n",
    "                # 生成树特征\n",
    "                h_tree = self.tree_projs[rel_idx][tree_idx](x) # [B, max_n, d_model]\n",
    "\n",
    "                if t_adj is not None:\n",
    "                    h_tree = self.transformer_layers[rel_idx][-1](h_tree, t_adj)\n",
    "#                 print('h_tree.shape:',h_tree.shape)\n",
    "                tree_features.append(h_tree)\n",
    "\n",
    "            # 门控融合\n",
    "            if tree_features:\n",
    "                gates = torch.stack(\n",
    "                    [self.gating_networks[rel_idx][i](t).squeeze(-1) for i,t in enumerate(tree_features)],\n",
    "                    dim=-1\n",
    "                )# 输出维度 [B, max_n, layers_tree]\n",
    "#                 print('gates.shape:',gates.shape)\n",
    "                alpha = F.softmax(gates, dim=-1)# 输出维度 [B, max_n, layers_tree]\n",
    "#                 print('alpha.shape:',alpha.shape)\n",
    "                fused = sum(\n",
    "                    t * alpha[...,i].unsqueeze(-1) \n",
    "                    for i,t in enumerate(tree_features)\n",
    "                )\n",
    "                h = h + fused\n",
    "\n",
    "            relation_outputs.append(h)\n",
    "            \n",
    "        combined = torch.cat(relation_outputs, dim=-1)  # [B, max_n, rel*d_model]\n",
    "        \n",
    "        logits = self.classifier(combined)  # [B, max_n, out_feat]\n",
    "        logits = F.log_softmax(logits, dim=-1)\n",
    "        \n",
    "        tsne_feats = torch.stack(relation_outputs, dim=2).mean(dim=2)  # [B, max_n, d_model]\n",
    "\n",
    "        return logits, tsne_feats\n",
    "        \n",
    "'''"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "736b3427",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "'''\n",
    "def calculate_tsne_loss(emb_p_list, emb_u_list, dist_matrix, batch_p_global_list, temperature=1, eps=1e-12):\n",
    "    \"\"\"\n",
    "    修改后的损失函数适配批量子图结构\n",
    "    :param emb_p_list: B_p子图嵌入列表 [每个子图节点数, d_model]\n",
    "    :param emb_u_list: B_u全局嵌入列表 [每个子图节点数, d_model]\n",
    "    :param dist_matrix: 全图距离矩阵 [num_nodes, num_nodes]\n",
    "    :param batch_p_global_list: B_p各子图的全局索引列表\n",
    "    :param temperature: 温度参数\n",
    "    :param eps: 数值稳定系数\n",
    "    \"\"\"\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(emb_p_list)\n",
    "    \n",
    "    # 遍历每个子图样本\n",
    "    for emb_p, emb_u, sub_global_idx in zip(emb_p_list, emb_u_list, batch_p_global_list):\n",
    "        # --- 局部项计算 ---\n",
    "        # 获取当前子图全局索引\n",
    "        sub_nodes = sub_global_idx\n",
    "        sub_size = len(sub_nodes)\n",
    "        \n",
    "        # 提取局部距离矩阵\n",
    "        dist_sub_p = dist_matrix[sub_nodes][:, sub_nodes]\n",
    "        \n",
    "        # 计算P矩阵\n",
    "        P = (1.0 + dist_sub_p ** 2) ** -1\n",
    "        P.fill_diagonal_(0)\n",
    "        P = (P + P.T) / 2  # 对称化\n",
    "        P = P / (P.sum(dim=1, keepdim=True) + eps)\n",
    "        P = torch.clamp(P, min=eps)\n",
    "        \n",
    "        # 计算Q矩阵\n",
    "        pairwise_dist = torch.cdist(emb_p, emb_p, p=2)\n",
    "        Q = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "        Q.fill_diagonal_(0)\n",
    "        Q = (Q + Q.T) / 2\n",
    "        Q = Q / (Q.sum(dim=1, keepdim=True) + eps)\n",
    "        Q = torch.clamp(Q, min=eps)\n",
    "        \n",
    "        # 局部损失项\n",
    "        loss_local = (torch.log(P) - torch.log(Q)).mean()\n",
    "        \n",
    "        # --- 全局项计算 ---\n",
    "        # 计算与全局采样的相似度\n",
    "        dist_pu_sq = torch.cdist(emb_p, emb_u, p=2) ** 2\n",
    "        d_bu = (1.0 + dist_pu_sq / temperature) ** -1\n",
    "        d_bu = d_bu.sum(dim=1)\n",
    "        \n",
    "        # 计算子图内部相似度\n",
    "        d_bp = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "        d_bp = d_bp.sum(dim=1) + eps\n",
    "        \n",
    "        # 计算k_Bp缩放系数\n",
    "        p_xi_full = (1.0 + dist_matrix ** 2) ** -1\n",
    "        sum_p_xi = p_xi_full[sub_nodes][:, sub_nodes].sum(dim=1)\n",
    "        k_Bp = (sum_p_xi / p_xi_full[sub_nodes].sum(dim=1)) * (dist_matrix.shape[0] / sub_size)\n",
    "        \n",
    "        # 全局损失项\n",
    "        ratio = (k_Bp.unsqueeze(1) * d_bu) / d_bp.unsqueeze(1)\n",
    "        loss_global = torch.log(ratio.clamp(min=eps)).mean()\n",
    "        \n",
    "        print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        # 累计批次损失\n",
    "        total_loss += (loss_local + loss_global)\n",
    "    \n",
    "    # 返回平均损失\n",
    "    return total_loss / batch_size\n",
    "'''\n",
    "\n",
    "\n",
    "def calculate_tsne_loss(emb_p_list, emb_u_list, dist_matrix, batch_p_global_list, batch_u_global_list, temperature=1, eps=1e-12):\n",
    "    \"\"\"\n",
    "    :param emb_p_list: B_p子图嵌入列表 [每个子图节点数, d_model]\n",
    "    :param emb_u_list: B_u全局嵌入列表 [每个子图节点数, d_model]\n",
    "    :param dist_matrix: 全图距离矩阵 [num_nodes, num_nodes]\n",
    "    :param batch_p_global_list: B_p各子图的全局索引列表\n",
    "    :param batch_u_global_list: B_u各子图的全局索引列表\n",
    "    :param temperature: 温度参数\n",
    "    :param eps: 数值稳定系数\n",
    "    \"\"\"\n",
    "    total_loss = 0.0\n",
    "    batch_size = len(emb_p_list)\n",
    "    \n",
    "    # 遍历每个子图样本\n",
    "    for emb_p, emb_u, sub_global_idx, u_global_idx in zip(emb_p_list, emb_u_list, batch_p_global_list, batch_u_global_list):\n",
    "        # --- 局部项计算 ---\n",
    "        # 获取当前子图全局索引\n",
    "        sub_nodes = sub_global_idx\n",
    "        sub_size = len(sub_nodes)\n",
    "        \n",
    "        # 提取局部距离矩阵\n",
    "        dist_sub_p = dist_matrix[sub_nodes][:, sub_nodes]\n",
    "        \n",
    "        # 计算 P 矩阵\n",
    "        P = (1.0 + dist_sub_p ** 2) ** -1\n",
    "        P.fill_diagonal_(0)\n",
    "        P = (P + P.T) / 2  # 对称化\n",
    "        P = P / (P.sum(dim=1, keepdim=True) + eps)\n",
    "        P = torch.clamp(P, min=eps)\n",
    "        \n",
    "        # 计算 Q 矩阵\n",
    "        pairwise_dist = torch.cdist(emb_p, emb_p, p=2)\n",
    "        Q = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "        Q.fill_diagonal_(0)\n",
    "        Q = (Q + Q.T) / 2\n",
    "        Q = Q / (Q.sum(dim=1, keepdim=True) + eps)\n",
    "        Q = torch.clamp(Q, min=eps)\n",
    "        \n",
    "        # 局部损失项\n",
    "        loss_local = (torch.log(P) - torch.log(Q)).mean()\n",
    "        \n",
    "        # --- 全局项计算 ---\n",
    "        # 计算与全局采样的相似度\n",
    "        dist_pu_sq = torch.cdist(emb_p, emb_u, p=2) ** 2\n",
    "        d_bu = (1.0 + dist_pu_sq / temperature) ** -1\n",
    "        d_bu = d_bu.sum(dim=1)\n",
    "        \n",
    "        # 计算子图内部相似度\n",
    "        d_bp = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "        d_bp = d_bp.sum(dim=1) + eps\n",
    "        \n",
    "        # 计算 k_Bp 缩放系数\n",
    "        p_xi_full = (1.0 + dist_matrix ** 2) ** -1\n",
    "        sum_p_xi = p_xi_full[sub_nodes][:, sub_nodes].sum(dim=1)\n",
    "        k_Bp = (sum_p_xi / p_xi_full[sub_nodes].sum(dim=1)) * (dist_matrix.shape[0] / sub_size)\n",
    "        \n",
    "        # 计算 BU 采样的期望\n",
    "        dist_u_sq = torch.cdist(emb_p, emb_u, p=2) ** 2\n",
    "        e_dist_u = (1.0 + dist_u_sq / temperature) ** -1\n",
    "        e_bu = e_dist_u.mean(dim=1)\n",
    "        \n",
    "        # 全局损失项\n",
    "        ratio = (k_Bp.unsqueeze(1) * e_bu.unsqueeze(1) * d_bu) / d_bp.unsqueeze(1)\n",
    "        loss_global = torch.log(ratio.clamp(min=eps)).mean()\n",
    "        \n",
    "        print(\"emb_p:\", emb_p.shape)\n",
    "        print(\"emb_p:\", emb_p)\n",
    "        print(\"emb_u:\", emb_u.shape)\n",
    "        print(\"emb_u:\", emb_u)\n",
    "        print(\"pairwise_dist:\", pairwise_dist)\n",
    "#         print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "        # 累计批次损失\n",
    "        total_loss += (loss_local + loss_global)\n",
    "        \n",
    "#     print(\"P matrix:\", P)\n",
    "#     print(\"Q matrix:\", Q)\n",
    "#     print(\"d_bu:\", d_bu)\n",
    "#     print(\"d_bp:\", d_bp)\n",
    "#     print(\"k_Bp:\", k_Bp)\n",
    "#     print(\"e_bu:\", e_bu)\n",
    "#     print(\"ratio:\", ratio)\n",
    "    # 返回平均损失\n",
    "    return total_loss / batch_size"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "bb1105ae",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "The size of tensor a (2) must match the size of tensor b (64) at non-singleton dimension 1",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mRuntimeError\u001b[39m                              Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[15]\u001b[39m\u001b[32m, line 156\u001b[39m\n\u001b[32m    153\u001b[39m         bu_global = np.random.choice(feat_data.shape[\u001b[32m0\u001b[39m], size=\u001b[38;5;28mlen\u001b[39m(sub_nodes), replace=\u001b[38;5;28;01mFalse\u001b[39;00m)\n\u001b[32m    154\u001b[39m         batch_u_global_list.append(bu_global)\n\u001b[32m--> \u001b[39m\u001b[32m156\u001b[39m     _, embeddings_p = \u001b[43mbsne_model\u001b[49m\u001b[43m(\u001b[49m\u001b[43msubgraph_data\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    158\u001b[39m \u001b[38;5;66;03m#     print(batch_u_global_list)\u001b[39;00m\n\u001b[32m    160\u001b[39m     subgraph_data_bu = []\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[13]\u001b[39m\u001b[32m, line 107\u001b[39m, in \u001b[36mmulti_HOGRL_Transformer.forward\u001b[39m\u001b[34m(self, subgraph_batch)\u001b[39m\n\u001b[32m    105\u001b[39m encoder = \u001b[38;5;28mself\u001b[39m.transformer_encoders[rel_idx]\n\u001b[32m    106\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m encoder.layer:\n\u001b[32m--> \u001b[39m\u001b[32m107\u001b[39m     h = \u001b[43mlayer\u001b[49m\u001b[43m(\u001b[49m\u001b[43mh\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m=\u001b[49m\u001b[43madj\u001b[49m\u001b[43m.\u001b[49m\u001b[43mtype\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtorch\u001b[49m\u001b[43m.\u001b[49m\u001b[43mFloatTensor\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[43mto\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdevice\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m[\u001b[32m0\u001b[39m]\n\u001b[32m    109\u001b[39m \u001b[38;5;66;03m# 树结构融合\u001b[39;00m\n\u001b[32m    110\u001b[39m tree_features = []\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/transformers/models/bert/modeling_bert.py:585\u001b[39m, in \u001b[36mBertLayer.forward\u001b[39m\u001b[34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[39m\n\u001b[32m    573\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mforward\u001b[39m(\n\u001b[32m    574\u001b[39m     \u001b[38;5;28mself\u001b[39m,\n\u001b[32m    575\u001b[39m     hidden_states: torch.Tensor,\n\u001b[32m   (...)\u001b[39m\u001b[32m    582\u001b[39m ) -> Tuple[torch.Tensor]:\n\u001b[32m    583\u001b[39m     \u001b[38;5;66;03m# decoder uni-directional self-attention cached key/values tuple is at positions 1,2\u001b[39;00m\n\u001b[32m    584\u001b[39m     self_attn_past_key_value = past_key_value[:\u001b[32m2\u001b[39m] \u001b[38;5;28;01mif\u001b[39;00m past_key_value \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;28;01melse\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m585\u001b[39m     self_attention_outputs = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mattention\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m    586\u001b[39m \u001b[43m        \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    587\u001b[39m \u001b[43m        \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    588\u001b[39m \u001b[43m        \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    589\u001b[39m \u001b[43m        \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m=\u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    590\u001b[39m \u001b[43m        \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[43m=\u001b[49m\u001b[43mself_attn_past_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    591\u001b[39m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    592\u001b[39m     attention_output = self_attention_outputs[\u001b[32m0\u001b[39m]\n\u001b[32m    594\u001b[39m     \u001b[38;5;66;03m# if decoder, the last output is tuple of self-attn cache\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/transformers/models/bert/modeling_bert.py:515\u001b[39m, in \u001b[36mBertAttention.forward\u001b[39m\u001b[34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[39m\n\u001b[32m    505\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mforward\u001b[39m(\n\u001b[32m    506\u001b[39m     \u001b[38;5;28mself\u001b[39m,\n\u001b[32m    507\u001b[39m     hidden_states: torch.Tensor,\n\u001b[32m   (...)\u001b[39m\u001b[32m    513\u001b[39m     output_attentions: Optional[\u001b[38;5;28mbool\u001b[39m] = \u001b[38;5;28;01mFalse\u001b[39;00m,\n\u001b[32m    514\u001b[39m ) -> Tuple[torch.Tensor]:\n\u001b[32m--> \u001b[39m\u001b[32m515\u001b[39m     self_outputs = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mself\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m    516\u001b[39m \u001b[43m        \u001b[49m\u001b[43mhidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    517\u001b[39m \u001b[43m        \u001b[49m\u001b[43mattention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    518\u001b[39m \u001b[43m        \u001b[49m\u001b[43mhead_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    519\u001b[39m \u001b[43m        \u001b[49m\u001b[43mencoder_hidden_states\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    520\u001b[39m \u001b[43m        \u001b[49m\u001b[43mencoder_attention_mask\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    521\u001b[39m \u001b[43m        \u001b[49m\u001b[43mpast_key_value\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    522\u001b[39m \u001b[43m        \u001b[49m\u001b[43moutput_attentions\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m    523\u001b[39m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m    524\u001b[39m     attention_output = \u001b[38;5;28mself\u001b[39m.output(self_outputs[\u001b[32m0\u001b[39m], hidden_states)\n\u001b[32m    525\u001b[39m     outputs = (attention_output,) + self_outputs[\u001b[32m1\u001b[39m:]  \u001b[38;5;66;03m# add attentions if we output them\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1518\u001b[39m, in \u001b[36mModule._wrapped_call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1516\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mself\u001b[39m._compiled_call_impl(*args, **kwargs)  \u001b[38;5;66;03m# type: ignore[misc]\u001b[39;00m\n\u001b[32m   1517\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m-> \u001b[39m\u001b[32m1518\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_call_impl\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/torch/nn/modules/module.py:1527\u001b[39m, in \u001b[36mModule._call_impl\u001b[39m\u001b[34m(self, *args, **kwargs)\u001b[39m\n\u001b[32m   1522\u001b[39m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[32m   1523\u001b[39m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[32m   1524\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m._backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m._forward_pre_hooks\n\u001b[32m   1525\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_backward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[32m   1526\u001b[39m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[32m-> \u001b[39m\u001b[32m1527\u001b[39m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m   1529\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m   1530\u001b[39m     result = \u001b[38;5;28;01mNone\u001b[39;00m\n",
      "\u001b[36mFile \u001b[39m\u001b[32m~/run/LML/anaconda3/envs/tsne/lib/python3.11/site-packages/transformers/models/bert/modeling_bert.py:331\u001b[39m, in \u001b[36mBertSelfAttention.forward\u001b[39m\u001b[34m(self, hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions)\u001b[39m\n\u001b[32m    328\u001b[39m attention_scores = attention_scores / math.sqrt(\u001b[38;5;28mself\u001b[39m.attention_head_size)\n\u001b[32m    329\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m attention_mask \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m    330\u001b[39m     \u001b[38;5;66;03m# Apply the attention mask is (precomputed for all layers in BertModel forward() function)\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m331\u001b[39m     attention_scores = \u001b[43mattention_scores\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[43mattention_mask\u001b[49m\n\u001b[32m    333\u001b[39m \u001b[38;5;66;03m# Normalize the attention scores to probabilities.\u001b[39;00m\n\u001b[32m    334\u001b[39m attention_probs = nn.functional.softmax(attention_scores, dim=-\u001b[32m1\u001b[39m)\n",
      "\u001b[31mRuntimeError\u001b[39m: The size of tensor a (2) must match the size of tensor b (64) at non-singleton dimension 1"
     ]
    }
   ],
   "source": [
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    # \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 64,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 500,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "# device = torch.device('cpu')\n",
    "print(device)\n",
    "\n",
    "timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "print('loading data...')\n",
    "prefix = \"../../data/\"\n",
    "\n",
    "edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "np.random.seed(args['seed'])\n",
    "rd.seed(args['seed'])\n",
    "\n",
    "if args['dataset'] == 'yelp':\n",
    "    index = list(range(len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                    test_size=args['test_size'], random_state=2,\n",
    "                                                                    shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "elif args['dataset'] == 'amazon':\n",
    "    index = list(range(3305, len(labels)))\n",
    "    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                    stratify=labels[3305:],\n",
    "                                                                    test_size=args['test_size'],\n",
    "                                                                    random_state=2, shuffle=True)\n",
    "    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                          stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                          random_state=2, shuffle=True)\n",
    "    dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "with open(dist_path, 'rb') as f:\n",
    "    dist_data = pickle.load(f)\n",
    "    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "\n",
    "adj_dict = defaultdict(list)\n",
    "for rel in edge_indexs:\n",
    "    edge_index = rel[0].cpu().numpy()\n",
    "    for src, dst in zip(edge_index[0], edge_index[1]):\n",
    "        adj_dict[src].append(dst)\n",
    "\n",
    "bsne_model = multi_HOGRL_Transformer(\n",
    "    in_feat=feat_data.shape[1],\n",
    "    out_feat=2,\n",
    "    relation_nums=len(edge_indexs),\n",
    "    d_model=128,\n",
    "    nhead=2,\n",
    "    num_layers=3,\n",
    "    dim_feedforward=256,\n",
    "    drop_rate=args['drop_rate'],\n",
    "    layers_tree=args['layers_tree'],\n",
    "    tsne_weight=args['tsne_weight']\n",
    ").to(device)\n",
    "\n",
    "for edge_index in edge_indexs:\n",
    "    edge_index[0] = edge_index[0].to(device)\n",
    "    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "feat_data = torch.tensor(feat_data).float().to(device)\n",
    "\n",
    "\n",
    "print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "bsne_model.classifier.requires_grad_(False)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['pretrain_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "pretrain_best_loss = float('inf')\n",
    "pretrain_no_improve = 0\n",
    "pretrain_early_stop = False\n",
    "\n",
    "# use_batchwise = args.get('use_batchwise', True)\n",
    "\n",
    "for epoch in range(args['pretrain_epochs']):\n",
    "    if pretrain_early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    # 采样中心节点\n",
    "    batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "    subgraphs = []\n",
    "    subgraph_data = []\n",
    "    batch_u_global_list = []\n",
    "    \n",
    "    \n",
    "    # 构建批量子图数据\n",
    "    for xi in batch_centers:\n",
    "        # 采样邻居节点\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.exp(-dist_row)\n",
    "        probs[xi] = 0\n",
    "        probs /= probs.sum()\n",
    "        neighbors = np.random.choice(len(dist_row), size=args['sample_size'], p=probs, replace=False)\n",
    "        sub_nodes = [xi] + neighbors.tolist()\n",
    "        subgraphs.append(sub_nodes)\n",
    "\n",
    "        # 构建子图数据结构\n",
    "        sub_edge_index = []\n",
    "        for rel_idx in range(len(edge_indexs)):\n",
    "            edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "            mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "            local_edges = edge_index[:, mask]\n",
    "\n",
    "            node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "\n",
    "            if len(src_nodes) > 0:\n",
    "                edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "            else:\n",
    "                edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "            sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "        subgraph_data.append({\n",
    "            'features': feat_data[sub_nodes].clone().detach(),\n",
    "            'edges': sub_edge_index,\n",
    "            'global_idx': sub_nodes\n",
    "        })\n",
    "        \n",
    "        bu_global = np.random.choice(feat_data.shape[0], size=len(sub_nodes), replace=False)\n",
    "        batch_u_global_list.append(bu_global)\n",
    "\n",
    "    _, embeddings_p = bsne_model(subgraph_data)\n",
    "    \n",
    "#     print(batch_u_global_list)\n",
    "    \n",
    "    subgraph_data_bu = []\n",
    "    for bu in batch_u_global_list:\n",
    "        feat_u = feat_data[bu]\n",
    "        # 为 Bu 子图构建 edges 属性\n",
    "        bu_edge_index = []\n",
    "        for rel_idx in range(len(edge_indexs)):\n",
    "            edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "            mask = np.isin(edge_index[0], bu) & np.isin(edge_index[1], bu)\n",
    "            local_edges = edge_index[:, mask]\n",
    "\n",
    "            node_map = {n: i for i, n in enumerate(bu)}\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]] if len(local_edges[0]) > 0 else []\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]] if len(local_edges[1]) > 0 else []\n",
    "\n",
    "            if len(src_nodes) > 0:\n",
    "                edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "            else:\n",
    "                edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "            bu_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "        subgraph_data_bu.append({\n",
    "            'features': feat_u,\n",
    "            'edges': bu_edge_index,\n",
    "            'global_idx': bu\n",
    "        })\n",
    "\n",
    "#     print(subgraph_data_bu[0]['features'].shape)\n",
    "#     print(subgraph_data_bu[0]['edges'])\n",
    "    with torch.no_grad():\n",
    "        _, embeddings_u = bsne_model(subgraph_data_bu)\n",
    "        \n",
    "        \n",
    "    \n",
    "#     batch_u_global = np.random.choice(feat_data.shape[0], size=len(sub_nodes), replace=False)\n",
    "#     feat_u = feat_data[batch_u_global]\n",
    "#     with torch.no_grad():\n",
    "#         _, embeddings_u = bsne_model([{\n",
    "#             'features': feat_u,\n",
    "#             'edges': [torch.empty((2, 0), dtype=torch.long).to(device) for _ in range(len(edge_indexs))],\n",
    "#             'global_idx': batch_u_global\n",
    "#         }])\n",
    "\n",
    "    tsne_loss = calculate_tsne_loss(\n",
    "        embeddings_p,\n",
    "        embeddings_u,\n",
    "        dist_matrix,\n",
    "        batch_p_global_list=[sg['global_idx'] for sg in subgraph_data],\n",
    "        batch_u_global_list=batch_u_global_list,\n",
    "        temperature=100,\n",
    "        eps=1e-10\n",
    "    ) * args['tsne_weight']\n",
    "\n",
    "    tsne_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    if tsne_loss.item() < pretrain_best_loss:\n",
    "        pretrain_best_loss = tsne_loss.item()\n",
    "        pretrain_no_improve = 0\n",
    "    else:\n",
    "        pretrain_no_improve += 1\n",
    "\n",
    "    if pretrain_no_improve >= args['pretrain_patience']:\n",
    "        print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "        pretrain_early_stop = True\n",
    "\n",
    "    writer.add_scalar('Pretrain/TSNE_Loss', tsne_loss.item(), epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        print(f'Pretrain Epoch: {epoch:03d}, TSNE Loss: {tsne_loss.item():.4f}')\n",
    "\n",
    "print(\"\\n=== Starting Fine-tuning ===\")\n",
    "bsne_model.classifier.requires_grad_(True)\n",
    "optimizer = torch.optim.AdamW(\n",
    "    filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "    lr=args['finetune_lr'],\n",
    "    weight_decay=5e-5\n",
    ")\n",
    "scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "    optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    ")\n",
    "\n",
    "best_val_auc = 0.0\n",
    "best_model_state = None\n",
    "train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "no_improve_epochs = 0\n",
    "early_stop = False\n",
    "\n",
    "for epoch in range(args['num_epochs']):\n",
    "    if early_stop:\n",
    "        break\n",
    "\n",
    "    bsne_model.train()\n",
    "    total_loss = 0.0\n",
    "\n",
    "    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "    subgraphs = []\n",
    "    subgraph_data = []\n",
    "\n",
    "    for xi in batch_centers:\n",
    "        dist_row = dist_matrix[xi].cpu().numpy()\n",
    "        probs = np.exp(-dist_row)\n",
    "        probs[xi] = 0\n",
    "        probs /= probs.sum()\n",
    "        neighbors = np.random.choice(len(dist_row), size=args['sample_size'], p=probs, replace=False)\n",
    "        sub_nodes = [xi] + neighbors.tolist()\n",
    "\n",
    "        sub_edge_index = []\n",
    "        for rel_idx in range(len(edge_indexs)):\n",
    "            edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "            mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "            local_edges = edge_index[:, mask]\n",
    "\n",
    "            node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "            src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "            dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "\n",
    "            if len(src_nodes) > 0:\n",
    "                edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "            else:\n",
    "                edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "            sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "        subgraph_data.append({\n",
    "            'features': feat_data[sub_nodes].clone().detach(),\n",
    "            'edges': sub_edge_index,\n",
    "            'global_idx': sub_nodes\n",
    "        })\n",
    "\n",
    "    optimizer.zero_grad()\n",
    "\n",
    "    out, _ = bsne_model(subgraph_data)\n",
    "    labels_sub = torch.cat([torch.tensor(labels[subgraph['global_idx']]) for subgraph in subgraph_data]).to(device)\n",
    "    cls_loss = F.nll_loss(out, labels_sub)\n",
    "\n",
    "    cls_loss.backward()\n",
    "    optimizer.step()\n",
    "\n",
    "    total_loss += cls_loss.item()\n",
    "\n",
    "    avg_loss = total_loss / args['batch_size']\n",
    "    writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "        writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "        writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "        writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "        print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "        scheduler.step(val_auc)\n",
    "\n",
    "        if val_auc > best_val_auc:\n",
    "            best_val_auc = val_auc\n",
    "            no_improve_epochs = 0\n",
    "            best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "        else:\n",
    "            no_improve_epochs += 1\n",
    "\n",
    "        if no_improve_epochs >= args['patience']:\n",
    "            print(f\"Early stopping at epoch {epoch}\")\n",
    "            early_stop = True\n",
    "\n",
    "bsne_model.load_state_dict(best_model_state)\n",
    "test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "print(f'\\n=== Final Test Results ===')\n",
    "print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "writer.close()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "15935840",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "51"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(sub_nodes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "777073db",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(3264, 51, 11944, 64)"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(embeddings_p),len(embeddings_u),len(dist_matrix),len(batch_centers)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "cd69b94b",
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'P' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mNameError\u001b[39m                                 Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[16]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m \u001b[43mP\u001b[49m\n",
      "\u001b[31mNameError\u001b[39m: name 'P' is not defined"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "40160cb3",
   "metadata": {},
   "outputs": [],
   "source": [
    "tsne_loss = calculate_tsne_loss(\n",
    "        embeddings_p,\n",
    "        embeddings_u,\n",
    "        dist_matrix,\n",
    "        batch_centers,\n",
    "        # batch_u_global,\n",
    "        temperature=100,\n",
    "        eps=1e-10\n",
    "    ) * args['tsne_weight']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a98a556",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6945d851",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0448f189",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9468d1d8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8d97eb20",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4461f30b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5363d7b6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "423b11ea",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2c57fd8",
   "metadata": {},
   "outputs": [],
   "source": [
    "def bsne_main(args):\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    # device = torch.device('cpu')\n",
    "    print(device)\n",
    "\n",
    "    timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "    writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "    print('loading data...')\n",
    "    prefix = \"../../data/\"\n",
    "\n",
    "    edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "    np.random.seed(args['seed'])\n",
    "    rd.seed(args['seed'])\n",
    "\n",
    "    if args['dataset'] == 'yelp':\n",
    "        index = list(range(len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                        test_size=args['test_size'], random_state=2,\n",
    "                                                                        shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"YelpChi_shortest_distance.pkl\")\n",
    "    elif args['dataset'] == 'amazon':\n",
    "        index = list(range(3305, len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                        stratify=labels[3305:],\n",
    "                                                                        test_size=args['test_size'],\n",
    "                                                                        random_state=2, shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "    with open(dist_path, 'rb') as f:\n",
    "        dist_data = pickle.load(f)\n",
    "        dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "\n",
    "    adj_dict = defaultdict(list)\n",
    "    for rel in edge_indexs:\n",
    "        edge_index = rel[0].cpu().numpy()\n",
    "        for src, dst in zip(edge_index[0], edge_index[1]):\n",
    "            adj_dict[src].append(dst)\n",
    "\n",
    "    bsne_model = multi_HOGRL_Transformer(\n",
    "        in_feat=feat_data.shape[1],\n",
    "        out_feat=2,\n",
    "        relation_nums=len(edge_indexs),\n",
    "        d_model=128,\n",
    "        nhead=2,\n",
    "        num_layers=3,\n",
    "        dim_feedforward=256,\n",
    "        drop_rate=args['drop_rate'],\n",
    "        layers_tree=args['layers_tree'],\n",
    "        tsne_weight=args['tsne_weight']\n",
    "    ).to(device)\n",
    "\n",
    "    for edge_index in edge_indexs:\n",
    "        edge_index[0] = edge_index[0].to(device)\n",
    "        edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "    feat_data = torch.tensor(feat_data).float().to(device)\n",
    "\n",
    "\n",
    "    print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "    bsne_model.classifier.requires_grad_(False)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "        lr=args['pretrain_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    pretrain_best_loss = float('inf')\n",
    "    pretrain_no_improve = 0\n",
    "    pretrain_early_stop = False\n",
    "\n",
    "    # use_batchwise = args.get('use_batchwise', True)\n",
    "\n",
    "    for epoch in range(args['pretrain_epochs']):\n",
    "        if pretrain_early_stop:\n",
    "            break\n",
    "\n",
    "        bsne_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # 采样中心节点\n",
    "        batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "        subgraphs = []\n",
    "        subgraph_data = []\n",
    "        # 构建批量子图数据\n",
    "        for xi in batch_centers:\n",
    "            # 采样邻居节点\n",
    "            dist_row = dist_matrix[xi].cpu().numpy()\n",
    "            probs = np.exp(-dist_row)\n",
    "            probs[xi] = 0\n",
    "            probs /= probs.sum()\n",
    "            neighbors = np.random.choice(len(dist_row), size=args['sample_size'], p=probs, replace=False)\n",
    "            sub_nodes = [xi] + neighbors.tolist()\n",
    "            subgraphs.append(sub_nodes)\n",
    "\n",
    "            # 构建子图数据结构\n",
    "            sub_edge_index = []\n",
    "            for rel_idx in range(len(edge_indexs)):\n",
    "                edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "                mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "                local_edges = edge_index[:, mask]\n",
    "\n",
    "                node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "                src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "                dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "\n",
    "                if len(src_nodes) > 0:\n",
    "                    edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "                else:\n",
    "                    edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "                sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "            subgraph_data.append({\n",
    "                'features': feat_data[sub_nodes].clone().detach(),\n",
    "                'edges': sub_edge_index,\n",
    "                'global_idx': sub_nodes\n",
    "            })\n",
    "\n",
    "        _, embeddings_p = bsne_model(subgraph_data)\n",
    "\n",
    "        batch_u_global = np.random.choice(feat_data.shape[0], size=len(sub_nodes), replace=False)\n",
    "        feat_u = feat_data[batch_u_global]\n",
    "        with torch.no_grad():\n",
    "            _, embeddings_u = bsne_model([{\n",
    "                'features': feat_u,\n",
    "                'edges': [torch.empty((2, 0), dtype=torch.long).to(device) for _ in range(len(edge_indexs))],\n",
    "                'global_idx': batch_u_global\n",
    "            }])\n",
    "        print(len(sub_nodes))\n",
    "        print(len(batch_centers))\n",
    "        print(batch_u_global)\n",
    "        tsne_loss = calculate_tsne_loss(\n",
    "            embeddings_p,\n",
    "            embeddings_u,\n",
    "            dist_matrix,\n",
    "            batch_centers,\n",
    "            # batch_u_global,\n",
    "            temperature=100,\n",
    "            eps=1e-10\n",
    "        ) * args['tsne_weight']\n",
    "\n",
    "        tsne_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        if tsne_loss.item() < pretrain_best_loss:\n",
    "            pretrain_best_loss = tsne_loss.item()\n",
    "            pretrain_no_improve = 0\n",
    "        else:\n",
    "            pretrain_no_improve += 1\n",
    "\n",
    "        if pretrain_no_improve >= args['pretrain_patience']:\n",
    "            print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "            pretrain_early_stop = True\n",
    "\n",
    "        writer.add_scalar('Pretrain/TSNE_Loss', tsne_loss.item(), epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            print(f'Pretrain Epoch: {epoch:03d}, TSNE Loss: {tsne_loss.item():.4f}')\n",
    "\n",
    "    print(\"\\n=== Starting Fine-tuning ===\")\n",
    "    bsne_model.classifier.requires_grad_(True)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, bsne_model.parameters()),\n",
    "        lr=args['finetune_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    "    )\n",
    "\n",
    "    best_val_auc = 0.0\n",
    "    best_model_state = None\n",
    "    train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "    no_improve_epochs = 0\n",
    "    early_stop = False\n",
    "\n",
    "    for epoch in range(args['num_epochs']):\n",
    "        if early_stop:\n",
    "            break\n",
    "\n",
    "        bsne_model.train()\n",
    "        total_loss = 0.0\n",
    "\n",
    "        batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "        subgraphs = []\n",
    "        subgraph_data = []\n",
    "\n",
    "        for xi in batch_centers:\n",
    "            dist_row = dist_matrix[xi].cpu().numpy()\n",
    "            probs = np.exp(-dist_row)\n",
    "            probs[xi] = 0\n",
    "            probs /= probs.sum()\n",
    "            neighbors = np.random.choice(len(dist_row), size=args['sample_size'], p=probs, replace=False)\n",
    "            sub_nodes = [xi] + neighbors.tolist()\n",
    "\n",
    "            sub_edge_index = []\n",
    "            for rel_idx in range(len(edge_indexs)):\n",
    "                edge_index = edge_indexs[rel_idx][0].cpu().numpy()\n",
    "                mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)\n",
    "                local_edges = edge_index[:, mask]\n",
    "\n",
    "                node_map = {n: i for i, n in enumerate(sub_nodes)}\n",
    "                src_nodes = [node_map[src] for src in local_edges[0]]\n",
    "                dst_nodes = [node_map[dst] for dst in local_edges[1]]\n",
    "\n",
    "                if len(src_nodes) > 0:\n",
    "                    edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)\n",
    "                else:\n",
    "                    edge_tensor = torch.empty((2, 0), dtype=torch.long)\n",
    "\n",
    "                sub_edge_index.append(edge_tensor.to(device))\n",
    "\n",
    "            subgraph_data.append({\n",
    "                'features': feat_data[sub_nodes].clone().detach(),\n",
    "                'edges': sub_edge_index,\n",
    "                'global_idx': sub_nodes\n",
    "            })\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        out, _ = bsne_model(subgraph_data)\n",
    "        labels_sub = torch.cat([torch.tensor(labels[subgraph['global_idx']]) for subgraph in subgraph_data]).to(device)\n",
    "        cls_loss = F.nll_loss(out, labels_sub)\n",
    "\n",
    "        cls_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += cls_loss.item()\n",
    "\n",
    "        avg_loss = total_loss / args['batch_size']\n",
    "        writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, bsne_model, feat_data, edge_indexs, device)\n",
    "\n",
    "            writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "            writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "            writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "            print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "            scheduler.step(val_auc)\n",
    "\n",
    "            if val_auc > best_val_auc:\n",
    "                best_val_auc = val_auc\n",
    "                no_improve_epochs = 0\n",
    "                best_model_state = copy.deepcopy(bsne_model.state_dict())\n",
    "            else:\n",
    "                no_improve_epochs += 1\n",
    "\n",
    "            if no_improve_epochs >= args['patience']:\n",
    "                print(f\"Early stopping at epoch {epoch}\")\n",
    "                early_stop = True\n",
    "\n",
    "    bsne_model.load_state_dict(best_model_state)\n",
    "    test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, bsne_model, feat_data, edge_indexs, device)\n",
    "    print(f'\\n=== Final Test Results ===')\n",
    "    print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "    writer.close()\n",
    "\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    # \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 64,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 500,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "bsne_main(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d55bd13",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "edba6085",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29405fc4",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
