{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "323bcfbe",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append('/data/run01/sczc619/LML/MetaTSNE')\n",
    "from bsne_utils import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "115f0981",
   "metadata": {},
   "outputs": [],
   "source": [
    "class GraphTransformerLayer(nn.Module):\n",
    "    def __init__(self, d_model=256, nhead=8, dim_feedforward=768, dropout=0.1):\n",
    "        super().__init__()\n",
    "        self.linear1 = nn.Linear(d_model, dim_feedforward)\n",
    "        self.self_attn = nn.MultiheadAttention(\n",
    "            embed_dim=d_model,\n",
    "            num_heads=nhead,\n",
    "            dropout=dropout,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.linear2 = nn.Linear(dim_feedforward, d_model)\n",
    "        self.norm1 = nn.LayerNorm(d_model)\n",
    "        self.norm2 = nn.LayerNorm(d_model)\n",
    "        self.dropout1 = nn.Dropout(dropout)\n",
    "        self.dropout2 = nn.Dropout(dropout)\n",
    "        self.activation = nn.ReLU()\n",
    "\n",
    "    def forward(self, src, adj_mask=None):\n",
    "        attn_mask = self._create_attention_mask(adj_mask)\n",
    "\n",
    "        src2 = self.norm1(src)\n",
    "        src2, attn_weights = self.self_attn(\n",
    "            src2, src2, src2,\n",
    "            attn_mask=attn_mask\n",
    "        )\n",
    "        src = src + self.dropout1(src2)\n",
    "\n",
    "        src2 = self.norm2(src)\n",
    "        src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n",
    "        src = src + self.dropout2(src2)\n",
    "        return src\n",
    "\n",
    "    def _create_attention_mask(self, adj_mask):\n",
    "        if adj_mask is None:\n",
    "            return None\n",
    "        mask = (adj_mask == 0).bool()\n",
    "        mask = mask.repeat(self.self_attn.num_heads, 1, 1)\n",
    "        return mask\n",
    "\n",
    "\n",
    "class multi_HOGRL_Transformer(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,\n",
    "                 nhead=8, num_layers=5, dim_feedforward=768,\n",
    "                 drop_rate=0.6, layers_tree=2, tsne_weight=0.1):\n",
    "        super().__init__()\n",
    "        self.relation_nums = relation_nums\n",
    "        self.d_model = d_model\n",
    "\n",
    "        self.feature_proj = nn.Linear(in_feat, d_model)\n",
    "\n",
    "        self.transformer_layers = nn.ModuleList([\n",
    "            nn.ModuleList([GraphTransformerLayer(\n",
    "                d_model=d_model,\n",
    "                nhead=nhead,\n",
    "                dim_feedforward=dim_feedforward,\n",
    "                dropout=drop_rate\n",
    "            ) for _ in range(num_layers)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.tree_projs = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Sequential(\n",
    "                nn.Linear(d_model, dim_feedforward),\n",
    "                nn.ReLU(),\n",
    "                nn.Linear(dim_feedforward, d_model)\n",
    "            ) for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.gating_networks = nn.ModuleList([\n",
    "            nn.ModuleList([nn.Linear(d_model, 1)\n",
    "                           for _ in range(layers_tree)])\n",
    "            for _ in range(relation_nums)\n",
    "        ])\n",
    "\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(relation_nums * d_model, 512),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop_rate),\n",
    "            nn.Linear(512, out_feat)\n",
    "        )\n",
    "\n",
    "        self.tsne_weight = tsne_weight\n",
    "\n",
    "    def forward(self, x, edge_indexs, sub_nodes=None):\n",
    "        if sub_nodes is not None:\n",
    "            x = x[sub_nodes]\n",
    "\n",
    "        x = self.feature_proj(x)\n",
    "\n",
    "        relation_outputs = []\n",
    "        for rel_idx in range(self.relation_nums):\n",
    "            edge_index = edge_indexs[rel_idx][0]\n",
    "            adj_matrix = to_dense_adj(edge_index, max_num_nodes=x.size(0))[0]\n",
    "            tree_indices = edge_indexs[rel_idx][1]\n",
    "\n",
    "            h = x\n",
    "            for layer in self.transformer_layers[rel_idx]:\n",
    "                h = layer(h, adj_matrix.bool())\n",
    "\n",
    "            tree_features = []\n",
    "            for tree_idx, tree_edges in enumerate(tree_indices):\n",
    "                tree_adj = to_dense_adj(tree_edges, max_num_nodes=x.size(0))[0]\n",
    "                h_tree = x\n",
    "                for layer in self.tree_projs[rel_idx][tree_idx]:\n",
    "                    h_tree = layer(h_tree)\n",
    "                h_tree = self.transformer_layers[rel_idx][-1](h_tree, tree_adj.bool())\n",
    "                tree_features.append(h_tree)\n",
    "\n",
    "            gates = torch.stack([\n",
    "                self.gating_networks[rel_idx][i](feat)\n",
    "                for i, feat in enumerate(tree_features)\n",
    "            ], dim=-1)\n",
    "            alpha = F.softmax(gates, dim=-1)\n",
    "            fused_tree = sum([feat * alpha[:, :, i]\n",
    "                              for i, feat in enumerate(tree_features)])\n",
    "\n",
    "            relation_output = h + fused_tree\n",
    "            relation_outputs.append(relation_output)\n",
    "\n",
    "        combined = torch.cat(relation_outputs, dim=-1)\n",
    "\n",
    "        logits = self.classifier(combined)\n",
    "        logits = F.log_softmax(logits, dim=-1)\n",
    "\n",
    "        tsne_feats = torch.stack(relation_outputs, dim=1).mean(dim=1)\n",
    "\n",
    "        return logits, tsne_feats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "6df464ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def calculate_tsne_loss(emb_p, emb_u, dist_sub_p, dist_matrix, batch_p_global, batch_u_global, temperature=1,\n",
    "                        eps=1e-12):\n",
    "    \"\"\"\n",
    "    :param emb_p: B_p子图嵌入 [batch_size, d_model]\n",
    "    :param emb_u: B_u全局嵌入 [batch_size, d_model]\n",
    "    :param dist_sub_p: B_p子图距离矩阵 [subgraph_size, subgraph_size]\n",
    "    :param dist_matrix: 全图距离矩阵 [num_nodes, num_nodes]\n",
    "    :param batch_p_global: B_p节点的全局索引\n",
    "    :param batch_u_global: B_u节点的全局索引\n",
    "    \"\"\"\n",
    "    device = emb_p.device\n",
    "    batch_size = emb_p.size(0)\n",
    "\n",
    "    # --- 局部项：基于B_p子图 ---\n",
    "    # 生成子图局部索引映射表\n",
    "    subnode_to_local = {node: i for i, node in enumerate(batch_p_global)}\n",
    "    local_indices = [subnode_to_local[node] for node in batch_p_global]\n",
    "\n",
    "    # 提取局部距离矩阵\n",
    "    dist_p = dist_sub_p[local_indices][:, local_indices]\n",
    "\n",
    "    # 计算P\n",
    "    # P = torch.exp(-dist_p ** 2)\n",
    "    P = (1.0 + dist_p ** 2) ** -1\n",
    "    P.fill_diagonal_(0)\n",
    "    P = (P + P.T) / 2  # 对称化\n",
    "    P = P / (P.sum(dim=1, keepdim=True) + eps)\n",
    "    # P = P / P.sum()\n",
    "    P = torch.clamp(P, min=eps)\n",
    "\n",
    "    # 计算Q\n",
    "    # pairwise_dist = torch.cdist(emb_p, emb_p)\n",
    "    pairwise_dist = torch.cdist(emb_p, emb_p, p=2)\n",
    "    Q = (1.0 + pairwise_dist ** 2 / temperature) ** -1\n",
    "    Q.fill_diagonal_(0)\n",
    "    Q = (Q + Q.T) / 2\n",
    "    Q = Q / (Q.sum(dim=1, keepdim=True) + eps)\n",
    "    # Q = Q / Q.sum()\n",
    "    Q = torch.clamp(Q, min=eps)\n",
    "\n",
    "    # print(len(local_indices))\n",
    "    # print(len(dist_p))\n",
    "    # print(len(emb_p))\n",
    "    # print(P.sum(),Q.sum())\n",
    "\n",
    "    # 局部损失：KL散度\n",
    "    # loss_local = (P * (torch.log(P) - torch.log(Q))).sum()\n",
    "    loss_local = (torch.log(P) - torch.log(Q)).mean()\n",
    "\n",
    "\n",
    "    # --- 全局项 ---\n",
    "    # # 计算B_p到B_u的距离\n",
    "    # dist_pu = torch.cdist(emb_p, emb_u)\n",
    "    #\n",
    "    # # 分子：sum_{x∈B_u} e^{-dθ(xi,x)}\n",
    "    # d_bu = torch.exp(-dist_pu).sum(dim=1)\n",
    "    #\n",
    "    # # 分母：sum_{x∈B_p} e^{-dθ(xi,x)}\n",
    "    # d_bp = torch.exp(-pairwise_dist).sum(dim=1) + eps\n",
    "    #\n",
    "    # # 计算k_Bp（使用全局距离矩阵）\n",
    "    #\n",
    "    # p_xi_full = torch.exp(-dist_matrix).sum(dim=1)  # [num_nodes]\n",
    "    # dist_sub_p = dist_matrix[batch_p_global][:, batch_p_global]\n",
    "    # sum_p_xi = torch.exp(-dist_sub_p).sum(dim=1)\n",
    "    # k_Bp = (sum_p_xi / p_xi_full[batch_p_global]) * (dist_matrix.shape[0] / batch_size)\n",
    "\n",
    "    # loss_global = torch.log(k_Bp * d_bu / d_bp + eps).mean()\n",
    "\n",
    "\n",
    "    # 计算emb_p到emb_u的距离（平方欧氏距离）\n",
    "    dist_pu_sq = torch.cdist(emb_p, emb_u, p=2) ** 2\n",
    "    d_bu = (1.0 + dist_pu_sq / temperature) ** -1\n",
    "    d_bu = d_bu.sum(dim=1)\n",
    "\n",
    "    # 计算emb_p内部的距离（平方欧氏距离）\n",
    "    pairwise_dist_sq = pairwise_dist ** 2\n",
    "    d_bp = (1.0 + pairwise_dist_sq / temperature) ** -1\n",
    "    d_bp = d_bp.sum(dim=1) + eps\n",
    "\n",
    "    # 计算k_Bp（保持原逻辑）\n",
    "    p_xi_full = (1.0 + dist_matrix ** 2) ** -1  # 原始空间的学生t核\n",
    "    sum_p_xi = p_xi_full[batch_p_global][:, batch_p_global].sum(dim=1)\n",
    "    k_Bp = (sum_p_xi / p_xi_full[batch_p_global].sum(dim=1)) * (dist_matrix.shape[0] / batch_size)\n",
    "\n",
    "    ratio = (k_Bp.unsqueeze(1) * d_bu) / d_bp.unsqueeze(1)\n",
    "    loss_global = torch.log(ratio.clamp(min=eps)).mean()\n",
    "\n",
    "#     print(len(k_Bp),f\"k_Bp: {k_Bp}\")\n",
    "#     print(f\"d_bu mean: {d_bu.mean().item()}, d_bu max: {d_bu.max().item()}, d_bu min: {d_bu.min().item()}\")\n",
    "#     print(f\"d_bp mean: {d_bp.mean().item()}, d_bp max: {d_bp.max().item()}, d_bp min: {d_bp.min().item()}\")\n",
    "\n",
    "\n",
    "#     print(f\"P mean: {P.mean().item()}, P max: {P.max().item()}, P min: {P.min().item()}\")\n",
    "#     print(f\"Q mean: {Q.mean().item()}, Q max: {Q.max().item()}, Q min: {Q.min().item()}\")\n",
    "    print(f\"local_loss: {loss_local.item()},global_loss: {loss_global.item()}\")\n",
    "\n",
    "    return loss_local + loss_global"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a9fc4976",
   "metadata": {},
   "outputs": [],
   "source": [
    "def test(idx_eval, y_eval, model, feat_data, edge_indexs, device):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        logits, _ = model(feat_data.to(device), edge_indexs, sub_nodes=None)\n",
    "        x_softmax = torch.exp(logits).cpu().detach()\n",
    "        positive_class_probs = x_softmax[:, 1].numpy()[np.array(idx_eval)]\n",
    "        auc_score = roc_auc_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        ap_score = average_precision_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        label_prob = (np.array(positive_class_probs) >= 0.5).astype(int)\n",
    "        f1_score_val = f1_score(np.array(y_eval), label_prob, average='macro')\n",
    "        g_mean = calculate_g_mean(np.array(y_eval), label_prob)\n",
    "\n",
    "    return auc_score, ap_score, f1_score_val, g_mean\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "aecf2e46",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Pretraining ===\n",
      "local_loss: 2.5837223529815674,global_loss: -0.03312237560749054\n",
      "Pretrain Epoch: 000, TSNE Loss: 0.7652\n",
      "local_loss: 2.5743165016174316,global_loss: -0.03813445195555687\n",
      "local_loss: 2.565742015838623,global_loss: -0.03398073837161064\n",
      "local_loss: 2.484656810760498,global_loss: -0.04385937377810478\n",
      "local_loss: 2.418201208114624,global_loss: -0.0484362430870533\n",
      "local_loss: 2.3630359172821045,global_loss: -0.05938762426376343\n",
      "Pretrain Epoch: 005, TSNE Loss: 0.6911\n",
      "local_loss: 2.2200393676757812,global_loss: -0.0675606057047844\n",
      "local_loss: 2.192493438720703,global_loss: -0.07863841950893402\n",
      "local_loss: 2.1034185886383057,global_loss: -0.08512630313634872\n",
      "local_loss: 1.9965931177139282,global_loss: -0.08952703326940536\n",
      "local_loss: 1.9434832334518433,global_loss: -0.107897087931633\n",
      "Pretrain Epoch: 010, TSNE Loss: 0.5507\n",
      "local_loss: 1.8159739971160889,global_loss: -0.09767112880945206\n",
      "local_loss: 1.6608006954193115,global_loss: -0.1076049879193306\n",
      "local_loss: 1.5930168628692627,global_loss: -0.10999751836061478\n",
      "local_loss: 1.4604055881500244,global_loss: -0.11353162676095963\n",
      "local_loss: 1.370214581489563,global_loss: -0.12933450937271118\n",
      "Pretrain Epoch: 015, TSNE Loss: 0.3723\n",
      "local_loss: 1.2670159339904785,global_loss: -0.1350531429052353\n",
      "local_loss: 1.1447371244430542,global_loss: -0.15081071853637695\n",
      "local_loss: 1.0554810762405396,global_loss: -0.15206116437911987\n",
      "local_loss: 0.9529496431350708,global_loss: -0.16277751326560974\n",
      "local_loss: 0.8182492256164551,global_loss: -0.151863694190979\n",
      "Pretrain Epoch: 020, TSNE Loss: 0.1999\n",
      "local_loss: 0.723981499671936,global_loss: -0.16183800995349884\n",
      "local_loss: 0.61651611328125,global_loss: -0.16870729625225067\n",
      "local_loss: 0.559822678565979,global_loss: -0.19281210005283356\n",
      "local_loss: 0.4652557969093323,global_loss: -0.17632801830768585\n",
      "local_loss: 0.37183547019958496,global_loss: -0.17050009965896606\n",
      "Pretrain Epoch: 025, TSNE Loss: 0.0604\n",
      "local_loss: 0.3152943253517151,global_loss: -0.17881344258785248\n",
      "local_loss: 0.2317950427532196,global_loss: -0.17657363414764404\n",
      "local_loss: 0.18846076726913452,global_loss: -0.18358373641967773\n",
      "local_loss: 0.12618423998355865,global_loss: -0.17065304517745972\n",
      "local_loss: 0.20569567382335663,global_loss: -0.24608199298381805\n",
      "Pretrain Epoch: 030, TSNE Loss: -0.0121\n",
      "local_loss: 0.07317344844341278,global_loss: -0.17092692852020264\n",
      "local_loss: 0.0646943673491478,global_loss: -0.18598540127277374\n",
      "local_loss: 0.0572146438062191,global_loss: -0.19900719821453094\n",
      "local_loss: 0.05632626265287399,global_loss: -0.20832933485507965\n",
      "local_loss: 0.05361839011311531,global_loss: -0.21801316738128662\n",
      "Pretrain Epoch: 035, TSNE Loss: -0.0493\n",
      "local_loss: 0.038373060524463654,global_loss: -0.20355162024497986\n",
      "local_loss: 0.027516089379787445,global_loss: -0.19425541162490845\n",
      "local_loss: 0.023607218638062477,global_loss: -0.19560016691684723\n",
      "local_loss: 0.017443662509322166,global_loss: -0.2009148746728897\n",
      "local_loss: 0.006532704923301935,global_loss: -0.18542295694351196\n",
      "Pretrain Epoch: 040, TSNE Loss: -0.0537\n",
      "local_loss: -0.0005999798886477947,global_loss: -0.17278076708316803\n",
      "local_loss: -0.006475452333688736,global_loss: -0.16300314664840698\n",
      "local_loss: -0.011942031793296337,global_loss: -0.15321239829063416\n",
      "local_loss: -0.015992948785424232,global_loss: -0.14289695024490356\n",
      "local_loss: -0.020507456734776497,global_loss: -0.12668775022029877\n",
      "Pretrain Epoch: 045, TSNE Loss: -0.0442\n",
      "local_loss: -0.02192828431725502,global_loss: -0.11699852347373962\n",
      "local_loss: -0.021232938393950462,global_loss: -0.10692447423934937\n",
      "local_loss: -0.02516794577240944,global_loss: -0.08167480677366257\n",
      "local_loss: -0.02368820272386074,global_loss: -0.07688672840595245\n",
      "local_loss: -0.027687890455126762,global_loss: -0.04352607950568199\n",
      "Pretrain Epoch: 050, TSNE Loss: -0.0214\n",
      "local_loss: -0.027179913595318794,global_loss: -0.038285259157419205\n",
      "local_loss: -0.02609638310968876,global_loss: -0.029367608949542046\n",
      "local_loss: -0.025954142212867737,global_loss: -0.017305221408605576\n",
      "local_loss: -0.026756836101412773,global_loss: 0.0033511158544570208\n",
      "local_loss: -0.02611355297267437,global_loss: 0.012796243652701378\n",
      "Pretrain Epoch: 055, TSNE Loss: -0.0040\n",
      "local_loss: -0.027323951944708824,global_loss: 0.03827330470085144\n",
      "local_loss: -0.023948457092046738,global_loss: 0.04477518051862717\n",
      "local_loss: -0.02270943485200405,global_loss: 0.06818254292011261\n",
      "local_loss: -0.015036707744002342,global_loss: 0.06494449079036713\n",
      "Pretrain early stopping at epoch 59\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 4.0630 | Val AUC: 0.8921 | Val F1: 0.7946\n",
      "Epoch: 005 | Loss: 0.1483 | Val AUC: 0.9350 | Val F1: 0.9191\n",
      "Epoch: 010 | Loss: 0.0998 | Val AUC: 0.9204 | Val F1: 0.9182\n",
      "Epoch: 015 | Loss: 0.1006 | Val AUC: 0.8949 | Val F1: 0.8912\n",
      "Epoch: 020 | Loss: 0.2217 | Val AUC: 0.9299 | Val F1: 0.9170\n",
      "Epoch: 025 | Loss: 0.1024 | Val AUC: 0.9310 | Val F1: 0.9160\n",
      "Epoch: 030 | Loss: 0.0958 | Val AUC: 0.8871 | Val F1: 0.9192\n",
      "Epoch: 035 | Loss: 0.1255 | Val AUC: 0.9176 | Val F1: 0.9166\n",
      "Epoch: 040 | Loss: 0.0727 | Val AUC: 0.9328 | Val F1: 0.9165\n",
      "Epoch: 045 | Loss: 0.0962 | Val AUC: 0.9378 | Val F1: 0.9177\n",
      "Epoch: 050 | Loss: 0.0936 | Val AUC: 0.9209 | Val F1: 0.9102\n",
      "Epoch: 055 | Loss: 0.1404 | Val AUC: 0.9378 | Val F1: 0.9155\n",
      "Epoch: 060 | Loss: 0.0639 | Val AUC: 0.9101 | Val F1: 0.8668\n",
      "Epoch: 065 | Loss: 0.0767 | Val AUC: 0.9266 | Val F1: 0.9076\n",
      "Epoch: 070 | Loss: 0.2195 | Val AUC: 0.9331 | Val F1: 0.9161\n",
      "Epoch: 075 | Loss: 0.1199 | Val AUC: 0.9273 | Val F1: 0.9021\n",
      "Epoch: 080 | Loss: 0.1121 | Val AUC: 0.9299 | Val F1: 0.9069\n",
      "Epoch: 085 | Loss: 0.1297 | Val AUC: 0.9367 | Val F1: 0.9097\n",
      "Epoch: 090 | Loss: 0.0646 | Val AUC: 0.9261 | Val F1: 0.9193\n",
      "Epoch: 095 | Loss: 0.0158 | Val AUC: 0.9350 | Val F1: 0.9202\n",
      "Epoch: 100 | Loss: 0.0737 | Val AUC: 0.9387 | Val F1: 0.9140\n",
      "Epoch: 105 | Loss: 0.1011 | Val AUC: 0.9325 | Val F1: 0.9021\n",
      "Epoch: 110 | Loss: 0.1175 | Val AUC: 0.9642 | Val F1: 0.9240\n",
      "Epoch: 115 | Loss: 0.0790 | Val AUC: 0.9105 | Val F1: 0.8723\n",
      "Epoch: 120 | Loss: 0.0922 | Val AUC: 0.9489 | Val F1: 0.9134\n",
      "Epoch: 125 | Loss: 0.0897 | Val AUC: 0.9013 | Val F1: 0.8787\n",
      "Epoch: 130 | Loss: 0.1118 | Val AUC: 0.9519 | Val F1: 0.9155\n",
      "Epoch: 135 | Loss: 0.0899 | Val AUC: 0.9515 | Val F1: 0.9166\n",
      "Epoch: 140 | Loss: 0.0292 | Val AUC: 0.9453 | Val F1: 0.9114\n",
      "Epoch: 145 | Loss: 0.1176 | Val AUC: 0.9580 | Val F1: 0.9134\n",
      "Epoch: 150 | Loss: 0.2841 | Val AUC: 0.9301 | Val F1: 0.9030\n",
      "Epoch: 155 | Loss: 0.0996 | Val AUC: 0.9366 | Val F1: 0.8857\n",
      "Epoch: 160 | Loss: 0.0905 | Val AUC: 0.9357 | Val F1: 0.8896\n",
      "Epoch: 165 | Loss: 0.0170 | Val AUC: 0.9443 | Val F1: 0.9025\n",
      "Epoch 00034: reducing learning rate of group 0 to 2.5000e-04.\n",
      "Epoch: 170 | Loss: 0.0832 | Val AUC: 0.9591 | Val F1: 0.9024\n",
      "Epoch: 175 | Loss: 0.1659 | Val AUC: 0.9465 | Val F1: 0.8622\n",
      "Epoch: 180 | Loss: 0.0608 | Val AUC: 0.9409 | Val F1: 0.8941\n",
      "Epoch: 185 | Loss: 0.0777 | Val AUC: 0.9512 | Val F1: 0.9134\n",
      "Epoch: 190 | Loss: 0.0376 | Val AUC: 0.9503 | Val F1: 0.9017\n",
      "Epoch: 195 | Loss: 0.0454 | Val AUC: 0.9549 | Val F1: 0.9139\n",
      "Epoch: 200 | Loss: 0.1309 | Val AUC: 0.9526 | Val F1: 0.8937\n",
      "Epoch: 205 | Loss: 0.0119 | Val AUC: 0.9417 | Val F1: 0.8928\n",
      "Epoch: 210 | Loss: 0.0619 | Val AUC: 0.9553 | Val F1: 0.8939\n",
      "Epoch: 215 | Loss: 0.0946 | Val AUC: 0.9539 | Val F1: 0.8549\n",
      "Epoch: 220 | Loss: 0.1020 | Val AUC: 0.9463 | Val F1: 0.8968\n",
      "Epoch 00045: reducing learning rate of group 0 to 1.2500e-04.\n",
      "Epoch: 225 | Loss: 0.0082 | Val AUC: 0.9551 | Val F1: 0.9083\n",
      "Epoch: 230 | Loss: 0.0651 | Val AUC: 0.9468 | Val F1: 0.8993\n",
      "Epoch: 235 | Loss: 0.1232 | Val AUC: 0.9521 | Val F1: 0.9071\n",
      "Epoch: 255 | Loss: 0.0723 | Val AUC: 0.9487 | Val F1: 0.9062\n",
      "Epoch: 260 | Loss: 0.0302 | Val AUC: 0.9388 | Val F1: 0.9114\n",
      "Early stopping at epoch 260\n",
      "\n",
      "=== Final Test Results ===\n",
      "Test AUC: 0.9564 | Test AP: 0.8592 | Test F1: 0.9136 | G-mean: 0.8864\n"
     ]
    }
   ],
   "source": [
    "def bsne_main(args):\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    # device = torch.device('cpu')\n",
    "    print(device)\n",
    "\n",
    "    timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "    writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "    print('loading data...')\n",
    "    prefix = \"/data/run01/sczc619/LML/MetaTSNE/data/\"\n",
    "    edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "    np.random.seed(args['seed'])\n",
    "    rd.seed(args['seed'])\n",
    "\n",
    "    if args['dataset'] == 'yelp':\n",
    "        index = list(range(len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                        test_size=args['test_size'], random_state=2,\n",
    "                                                                        shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Yelp_shortest_distance.pkl\")\n",
    "    elif args['dataset'] == 'amazon':\n",
    "        index = list(range(3305, len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                        stratify=labels[3305:],\n",
    "                                                                        test_size=args['test_size'],\n",
    "                                                                        random_state=2, shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "    with open(dist_path, 'rb') as f:\n",
    "        dist_data = pickle.load(f)\n",
    "        dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "\n",
    "    adj_dict = defaultdict(list)\n",
    "    for rel in edge_indexs:\n",
    "        edge_index = rel[0].cpu().numpy()\n",
    "        for src, dst in zip(edge_index[0], edge_index[1]):\n",
    "            adj_dict[src].append(dst)\n",
    "\n",
    "    gnn_model = multi_HOGRL_Transformer(\n",
    "        in_feat=feat_data.shape[1],\n",
    "        out_feat=2,\n",
    "        relation_nums=len(edge_indexs),\n",
    "        d_model=128,\n",
    "        nhead=2,\n",
    "        num_layers=3,\n",
    "        dim_feedforward=256,\n",
    "        drop_rate=args['drop_rate'],\n",
    "        layers_tree=args['layers_tree'],\n",
    "        tsne_weight=args['tsne_weight']\n",
    "    ).to(device)\n",
    "\n",
    "    for edge_index in edge_indexs:\n",
    "        edge_index[0] = edge_index[0].to(device)\n",
    "        edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "    feat_data = torch.tensor(feat_data).float().to(device)\n",
    "\n",
    "\n",
    "    print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "    gnn_model.classifier.requires_grad_(False)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, gnn_model.parameters()),\n",
    "        lr=args['pretrain_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    pretrain_best_loss = float('inf')\n",
    "    pretrain_no_improve = 0\n",
    "    pretrain_early_stop = False\n",
    "\n",
    "    for epoch in range(args['pretrain_epochs']):\n",
    "        if pretrain_early_stop:\n",
    "            break\n",
    "\n",
    "        gnn_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # 第一次采样\n",
    "        batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "        sub_nodes_p = sample_subgraph(batch_centers, dist_matrix, args['sample_size'])\n",
    "\n",
    "        # 第二次采样\n",
    "        batch_u_global = np.random.choice(feat_data.shape[0], size=len(sub_nodes_p), replace=False)\n",
    "\n",
    "        # 生成B_p嵌入\n",
    "        feat_sub_p = feat_data[sub_nodes_p]\n",
    "        _, embeddings_p = gnn_model(feat_sub_p, edge_indexs, sub_nodes=None)\n",
    "\n",
    "        # 生成B_u嵌入\n",
    "        feat_u = feat_data[batch_u_global]\n",
    "        with torch.no_grad():\n",
    "            _, embeddings_u = gnn_model(feat_u, edge_indexs, sub_nodes=None)\n",
    "\n",
    "        # 获取B_p子图距离矩阵\n",
    "        dist_sub_p = dist_matrix[sub_nodes_p][:, sub_nodes_p]\n",
    "\n",
    "        # 计算损失\n",
    "        tsne_loss = calculate_tsne_loss(\n",
    "            embeddings_p,  # 子图嵌入\n",
    "            embeddings_u,  # 全局采样嵌入\n",
    "            dist_sub_p,  # B_p子图距离\n",
    "            dist_matrix,  # 全图距离\n",
    "            sub_nodes_p,  # B_p全局索引\n",
    "            batch_u_global,  # B_u全局索引\n",
    "            temperature=100,\n",
    "            eps=1e-10\n",
    "        ) * args['tsne_weight']\n",
    "\n",
    "\n",
    "        tsne_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        # print(f\"Epoch {epoch}: Loss={tsne_loss.item():.4f}\")\n",
    "\n",
    "        if tsne_loss.item() < pretrain_best_loss:\n",
    "            pretrain_best_loss = tsne_loss.item()\n",
    "            pretrain_no_improve = 0\n",
    "        else:\n",
    "            pretrain_no_improve += 1\n",
    "\n",
    "        if pretrain_no_improve >= args['pretrain_patience']:\n",
    "            print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "            pretrain_early_stop = True\n",
    "\n",
    "        writer.add_scalar('Pretrain/TSNE_Loss', tsne_loss.item(), epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            print(f'Pretrain Epoch: {epoch:03d}, TSNE Loss: {tsne_loss.item():.4f}')\n",
    "\n",
    "    print(\"\\n=== Starting Fine-tuning ===\")\n",
    "    gnn_model.classifier.requires_grad_(True)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, gnn_model.parameters()),\n",
    "        lr=args['finetune_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    "    )\n",
    "\n",
    "    best_val_auc = 0.0\n",
    "    best_model_state = None\n",
    "    train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "    no_improve_epochs = 0\n",
    "    early_stop = False\n",
    "\n",
    "    for epoch in range(args['num_epochs']):\n",
    "        if early_stop:\n",
    "            break\n",
    "\n",
    "        gnn_model.train()\n",
    "        total_loss = 0.0\n",
    "\n",
    "        batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "        sub_nodes = sample_subgraph(batch_centers, dist_matrix, args['sample_size'])\n",
    "        batch_mask = [i for i, node in enumerate(sub_nodes) if node in batch_centers]\n",
    "\n",
    "        feat_sub = feat_data[sub_nodes]\n",
    "        labels_sub = labels[sub_nodes]\n",
    "\n",
    "        for _ in range(len(sub_nodes) // args['batch_size']):\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            out, _ = gnn_model(feat_sub, edge_indexs, sub_nodes=None)\n",
    "            cls_loss = F.nll_loss(out[batch_mask], torch.LongTensor(labels_sub[batch_mask]).to(device))\n",
    "\n",
    "            cls_loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            total_loss += cls_loss.item()\n",
    "\n",
    "        avg_loss = total_loss / (len(sub_nodes) // args['batch_size'])\n",
    "        writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, gnn_model, feat_data, edge_indexs, device)\n",
    "\n",
    "            writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "            writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "            writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "            print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "            scheduler.step(val_auc)\n",
    "\n",
    "            if val_auc > best_val_auc:\n",
    "                best_val_auc = val_auc\n",
    "                no_improve_epochs = 0\n",
    "                best_model_state = copy.deepcopy(gnn_model.state_dict())\n",
    "            else:\n",
    "                no_improve_epochs += 1\n",
    "\n",
    "            if no_improve_epochs >= args['patience']:\n",
    "                print(f\"Early stopping at epoch {epoch}\")\n",
    "                early_stop = True\n",
    "\n",
    "    gnn_model.load_state_dict(best_model_state)\n",
    "    test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, gnn_model, feat_data, edge_indexs, device)\n",
    "    print(f'\\n=== Final Test Results ===')\n",
    "    print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "    writer.close()\n",
    "\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 128,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 500,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "bsne_main(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "52de9f3c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n",
      "\n",
      "=== Starting Fine-tuning ===\n",
      "Epoch: 000 | Loss: 2.2548 | Val AUC: 0.8852 | Val F1: 0.9039\n",
      "Epoch: 020 | Loss: 0.1636 | Val AUC: 0.8154 | Val F1: 0.8404\n",
      "Epoch: 025 | Loss: 0.0709 | Val AUC: 0.9296 | Val F1: 0.9150\n",
      "Epoch: 030 | Loss: 0.2065 | Val AUC: 0.9058 | Val F1: 0.9063\n",
      "Epoch: 035 | Loss: 0.2067 | Val AUC: 0.9239 | Val F1: 0.9213\n",
      "Epoch: 040 | Loss: 0.1329 | Val AUC: 0.9248 | Val F1: 0.9229\n",
      "Epoch: 045 | Loss: 0.1486 | Val AUC: 0.9092 | Val F1: 0.8827\n",
      "Epoch: 050 | Loss: 0.1317 | Val AUC: 0.9308 | Val F1: 0.9218\n",
      "Epoch: 055 | Loss: 0.0786 | Val AUC: 0.9219 | Val F1: 0.9166\n",
      "Epoch: 060 | Loss: 0.1526 | Val AUC: 0.9241 | Val F1: 0.8987\n",
      "Epoch: 065 | Loss: 0.2956 | Val AUC: 0.8949 | Val F1: 0.8835\n",
      "Epoch: 070 | Loss: 0.2086 | Val AUC: 0.9203 | Val F1: 0.8928\n",
      "Epoch 00015: reducing learning rate of group 0 to 2.5000e-04.\n",
      "Epoch: 075 | Loss: 0.1642 | Val AUC: 0.9164 | Val F1: 0.9091\n",
      "Epoch: 080 | Loss: 0.0602 | Val AUC: 0.8975 | Val F1: 0.8895\n",
      "Epoch: 085 | Loss: 0.0768 | Val AUC: 0.9282 | Val F1: 0.9093\n",
      "Epoch: 090 | Loss: 0.1123 | Val AUC: 0.9238 | Val F1: 0.9009\n",
      "Epoch: 095 | Loss: 0.0376 | Val AUC: 0.9198 | Val F1: 0.9059\n",
      "Epoch: 100 | Loss: 0.1505 | Val AUC: 0.9315 | Val F1: 0.9113\n",
      "Epoch: 105 | Loss: 0.0457 | Val AUC: 0.9280 | Val F1: 0.9156\n",
      "Epoch: 110 | Loss: 0.0360 | Val AUC: 0.9251 | Val F1: 0.9026\n",
      "Epoch: 115 | Loss: 0.1353 | Val AUC: 0.9287 | Val F1: 0.9055\n",
      "Epoch: 120 | Loss: 0.3075 | Val AUC: 0.8832 | Val F1: 0.8637\n",
      "Epoch: 125 | Loss: 0.0882 | Val AUC: 0.9482 | Val F1: 0.9181\n",
      "Epoch: 130 | Loss: 0.0356 | Val AUC: 0.8854 | Val F1: 0.8965\n",
      "Epoch: 135 | Loss: 0.0688 | Val AUC: 0.9446 | Val F1: 0.9010\n",
      "Epoch: 140 | Loss: 0.1391 | Val AUC: 0.9469 | Val F1: 0.9073\n",
      "Epoch: 145 | Loss: 0.0451 | Val AUC: 0.9548 | Val F1: 0.9218\n",
      "Epoch: 150 | Loss: 0.0661 | Val AUC: 0.9398 | Val F1: 0.9053\n",
      "Epoch: 155 | Loss: 0.0975 | Val AUC: 0.9371 | Val F1: 0.9150\n",
      "Epoch: 160 | Loss: 0.0755 | Val AUC: 0.9444 | Val F1: 0.9025\n"
     ]
    }
   ],
   "source": [
    "def bsne_main(args):\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    # device = torch.device('cpu')\n",
    "    print(device)\n",
    "\n",
    "    timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
    "    writer = SummaryWriter(f'runs/{args[\"dataset\"]}_{timestamp}')\n",
    "\n",
    "    print('loading data...')\n",
    "    prefix = \"/data/run01/sczc619/LML/MetaTSNE/data/\"\n",
    "    edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)\n",
    "\n",
    "    np.random.seed(args['seed'])\n",
    "    rd.seed(args['seed'])\n",
    "\n",
    "    if args['dataset'] == 'yelp':\n",
    "        index = list(range(len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,\n",
    "                                                                        test_size=args['test_size'], random_state=2,\n",
    "                                                                        shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Yelp_shortest_distance.pkl\")\n",
    "    elif args['dataset'] == 'amazon':\n",
    "        index = list(range(3305, len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],\n",
    "                                                                        stratify=labels[3305:],\n",
    "                                                                        test_size=args['test_size'],\n",
    "                                                                        random_state=2, shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,\n",
    "                                                              stratify=y_train_val, test_size=args['val_size'],\n",
    "                                                              random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "\n",
    "    with open(dist_path, 'rb') as f:\n",
    "        dist_data = pickle.load(f)\n",
    "        dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "\n",
    "\n",
    "    adj_dict = defaultdict(list)\n",
    "    for rel in edge_indexs:\n",
    "        edge_index = rel[0].cpu().numpy()\n",
    "        for src, dst in zip(edge_index[0], edge_index[1]):\n",
    "            adj_dict[src].append(dst)\n",
    "\n",
    "    gnn_model = multi_HOGRL_Transformer(\n",
    "        in_feat=feat_data.shape[1],\n",
    "        out_feat=2,\n",
    "        relation_nums=len(edge_indexs),\n",
    "        d_model=128,\n",
    "        nhead=2,\n",
    "        num_layers=3,\n",
    "        dim_feedforward=256,\n",
    "        drop_rate=args['drop_rate'],\n",
    "        layers_tree=args['layers_tree'],\n",
    "        tsne_weight=args['tsne_weight']\n",
    "    ).to(device)\n",
    "\n",
    "    for edge_index in edge_indexs:\n",
    "        edge_index[0] = edge_index[0].to(device)\n",
    "        edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "    feat_data = torch.tensor(feat_data).float().to(device)\n",
    "    '''\n",
    "\n",
    "    print(\"\\n=== Starting Pretraining ===\")\n",
    "\n",
    "    gnn_model.classifier.requires_grad_(False)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, gnn_model.parameters()),\n",
    "        lr=args['pretrain_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    pretrain_best_loss = float('inf')\n",
    "    pretrain_no_improve = 0\n",
    "    pretrain_early_stop = False\n",
    "\n",
    "    for epoch in range(args['pretrain_epochs']):\n",
    "        if pretrain_early_stop:\n",
    "            break\n",
    "\n",
    "        gnn_model.train()\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        # 第一次采样\n",
    "        batch_centers = rd.sample(range(feat_data.shape[0]), args['batch_size'])\n",
    "        sub_nodes_p = sample_subgraph(batch_centers, dist_matrix, args['sample_size'])\n",
    "\n",
    "        # 第二次采样\n",
    "        batch_u_global = np.random.choice(feat_data.shape[0], size=len(sub_nodes_p), replace=False)\n",
    "\n",
    "        # 生成B_p嵌入\n",
    "        feat_sub_p = feat_data[sub_nodes_p]\n",
    "        _, embeddings_p = gnn_model(feat_sub_p, edge_indexs, sub_nodes=None)\n",
    "\n",
    "        # 生成B_u嵌入\n",
    "        feat_u = feat_data[batch_u_global]\n",
    "        with torch.no_grad():\n",
    "            _, embeddings_u = gnn_model(feat_u, edge_indexs, sub_nodes=None)\n",
    "\n",
    "        # 获取B_p子图距离矩阵\n",
    "        dist_sub_p = dist_matrix[sub_nodes_p][:, sub_nodes_p]\n",
    "\n",
    "        # 计算损失\n",
    "        tsne_loss = calculate_tsne_loss(\n",
    "            embeddings_p,  # 子图嵌入\n",
    "            embeddings_u,  # 全局采样嵌入\n",
    "            dist_sub_p,  # B_p子图距离\n",
    "            dist_matrix,  # 全图距离\n",
    "            sub_nodes_p,  # B_p全局索引\n",
    "            batch_u_global,  # B_u全局索引\n",
    "            temperature=100,\n",
    "            eps=1e-10\n",
    "        ) * args['tsne_weight']\n",
    "\n",
    "\n",
    "        tsne_loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        # print(f\"Epoch {epoch}: Loss={tsne_loss.item():.4f}\")\n",
    "\n",
    "        if tsne_loss.item() < pretrain_best_loss:\n",
    "            pretrain_best_loss = tsne_loss.item()\n",
    "            pretrain_no_improve = 0\n",
    "        else:\n",
    "            pretrain_no_improve += 1\n",
    "\n",
    "        if pretrain_no_improve >= args['pretrain_patience']:\n",
    "            print(f\"Pretrain early stopping at epoch {epoch}\")\n",
    "            pretrain_early_stop = True\n",
    "\n",
    "        writer.add_scalar('Pretrain/TSNE_Loss', tsne_loss.item(), epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            print(f'Pretrain Epoch: {epoch:03d}, TSNE Loss: {tsne_loss.item():.4f}')\n",
    "    '''\n",
    "    print(\"\\n=== Starting Fine-tuning ===\")\n",
    "    gnn_model.classifier.requires_grad_(True)\n",
    "    optimizer = torch.optim.AdamW(\n",
    "        filter(lambda p: p.requires_grad, gnn_model.parameters()),\n",
    "        lr=args['finetune_lr'],\n",
    "        weight_decay=5e-5\n",
    "    )\n",
    "    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, mode='max', factor=0.5, patience=10, verbose=True\n",
    "    )\n",
    "\n",
    "    best_val_auc = 0.0\n",
    "    best_model_state = None\n",
    "    train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "\n",
    "    no_improve_epochs = 0\n",
    "    early_stop = False\n",
    "\n",
    "    for epoch in range(args['num_epochs']):\n",
    "        if early_stop:\n",
    "            break\n",
    "\n",
    "        gnn_model.train()\n",
    "        total_loss = 0.0\n",
    "\n",
    "        batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])\n",
    "        sub_nodes = sample_subgraph(batch_centers, dist_matrix, args['sample_size'])\n",
    "        batch_mask = [i for i, node in enumerate(sub_nodes) if node in batch_centers]\n",
    "\n",
    "        feat_sub = feat_data[sub_nodes]\n",
    "        labels_sub = labels[sub_nodes]\n",
    "\n",
    "        for _ in range(len(sub_nodes) // args['batch_size']):\n",
    "            optimizer.zero_grad()\n",
    "\n",
    "            out, _ = gnn_model(feat_sub, edge_indexs, sub_nodes=None)\n",
    "            cls_loss = F.nll_loss(out[batch_mask], torch.LongTensor(labels_sub[batch_mask]).to(device))\n",
    "\n",
    "            cls_loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            total_loss += cls_loss.item()\n",
    "\n",
    "        avg_loss = total_loss / (len(sub_nodes) // args['batch_size'])\n",
    "        writer.add_scalar('FineTune/Train_Loss', avg_loss, epoch)\n",
    "\n",
    "        if epoch % 5 == 0:\n",
    "            val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, gnn_model, feat_data, edge_indexs, device)\n",
    "\n",
    "            writer.add_scalar('Validation/AUC', val_auc, epoch)\n",
    "            writer.add_scalar('Validation/F1', val_f1, epoch)\n",
    "            writer.add_scalar('Validation/GMean', val_g_mean, epoch)\n",
    "\n",
    "            print(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f}')\n",
    "\n",
    "            scheduler.step(val_auc)\n",
    "\n",
    "            if val_auc > best_val_auc:\n",
    "                best_val_auc = val_auc\n",
    "                no_improve_epochs = 0\n",
    "                best_model_state = copy.deepcopy(gnn_model.state_dict())\n",
    "            else:\n",
    "                no_improve_epochs += 1\n",
    "\n",
    "            if no_improve_epochs >= args['patience']:\n",
    "                print(f\"Early stopping at epoch {epoch}\")\n",
    "                early_stop = True\n",
    "\n",
    "    gnn_model.load_state_dict(best_model_state)\n",
    "    test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, gnn_model, feat_data, edge_indexs, device)\n",
    "    print(f'\\n=== Final Test Results ===')\n",
    "    print(f'Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')\n",
    "    writer.close()\n",
    "\n",
    "args = {\n",
    "    \"dataset\": \"amazon\",\n",
    "    \"batch_size\": 128,\n",
    "    \"sample_size\": 50,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 32,\n",
    "    \"pretrain_epochs\": 200,\n",
    "    \"pretrain_lr\": 0.001,\n",
    "    \"finetune_lr\": 0.0005,\n",
    "    \"num_epochs\": 500,\n",
    "    \"pretrain_patience\": 20,\n",
    "    \"patience\": 30,\n",
    "    \"tsne_weight\": 0.3,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"num_heads\": 2,\n",
    "    \"drop_rate\": 0.5\n",
    "}\n",
    "bsne_main(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35464141",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
