{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f034ec8e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import sys\n",
    "sys.path.append('/data/run01/sczc619/LML/MetaTSNE')\n",
    "from hogrl_main import *\n",
    "from hogrl_model import *\n",
    "from hogrl_utils import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "758f566c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import os\n",
    "\n",
    "def calculate_tsne_loss(embedding, dist_matrix, perplexity=30.0, temperature=0.1, alpha=1.0, df=1.0, eps=1e-12):\n",
    "    N = embedding.shape[0]\n",
    "    device = embedding.device\n",
    "\n",
    "    distances = dist_matrix.float().clone()\n",
    "    distances.fill_diagonal_(0)\n",
    "    \n",
    "    inv_distances = alpha / (distances + 1e-6)\n",
    "    sigma = find_sigma(inv_distances, perplexity).view(-1, 1)\n",
    "#     sigma = find_sigma(distances, perplexity)\n",
    "#     sigma = sigma.view(-1, 1)\n",
    "    \n",
    "    with torch.no_grad():\n",
    "#         P = torch.exp(-distances**2 / (2 * sigma**2))\n",
    "        P = torch.exp(-inv_distances / (2 * sigma**2))\n",
    "        P = P.clone()\n",
    "        P.fill_diagonal_(0)\n",
    "        P = (P + P.T) / 2 \n",
    "        P = P / (P.sum(dim=1, keepdim=True) + eps)\n",
    "        P = P / P.sum() \n",
    "        P = torch.clamp(P, min=eps, max=1.0)\n",
    "    \n",
    "    pairwise_dist = torch.cdist(embedding, embedding)\n",
    "#     Q = (1 + pairwise_dist**2 / temperature)**(-1)\n",
    "    Q = (1 + pairwise_dist**2)**(-1)\n",
    "    Q = Q.clone() \n",
    "    Q.fill_diagonal_(0)\n",
    "#     Q = (1 + pairwise_dist**2 / df)**(-(df + 1)/2)\n",
    "#     Q.fill_diagonal_(0)\n",
    "    \n",
    "    Q = (Q + Q.T) / 2\n",
    "    Q = Q / (Q.sum(dim=1, keepdim=True) + eps)\n",
    "    Q = Q / Q.sum()\n",
    "    Q = torch.clamp(Q, min=eps, max=1.0)\n",
    "    \n",
    "    \n",
    "    \n",
    "    loss = (P * (torch.log(P) - torch.log(Q))).sum()\n",
    "    \n",
    "    print(f\"P mean: {P.mean().item()}, P max: {P.max().item()}, P min: {P.min().item()}\")\n",
    "    print(f\"Q mean: {Q.mean().item()}, Q max: {Q.max().item()}, Q min: {Q.min().item()}\")\n",
    "    print(f\"Loss: {loss.item()}\")\n",
    "    return loss\n",
    "\n",
    "def find_sigma(distances, target_perplexity, tol=1e-5, max_iter=50):\n",
    "    distances = distances.detach()\n",
    "    sigmas = []\n",
    "    for row in distances:\n",
    "        sigma_min, sigma_max = 1e-5, 100.0\n",
    "        best_sigma = sigma_max\n",
    "        for _ in range(max_iter):\n",
    "            sigma = (sigma_min + sigma_max) / 2\n",
    "            p = torch.exp(-row**2 / (2 * sigma**2))\n",
    "            p = p / (p.sum() + 1e-8)\n",
    "            entropy = -torch.sum(p * torch.log2(p + 1e-12))\n",
    "            current_perplexity = 2**entropy\n",
    "            \n",
    "            if abs(current_perplexity - target_perplexity) < tol:\n",
    "                best_sigma = sigma\n",
    "                break\n",
    "                \n",
    "            if current_perplexity > target_perplexity:\n",
    "                sigma_max = sigma\n",
    "            else:\n",
    "                sigma_min = sigma\n",
    "        sigmas.append(best_sigma)\n",
    "    return torch.tensor(sigmas, device=distances.device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "535c8ef7",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "class multi_HOGRL_Model(nn.Module):\n",
    "    def __init__(self, in_feat, out_feat, relation_nums=3, hidden=32, \n",
    "                 drop_rate=0.6, weight=1, num_layers=2, layers_tree=2,\n",
    "                 tsne_weight=0.1):\n",
    "        super(multi_HOGRL_Model, self).__init__()\n",
    "        self.relation_nums=relation_nums\n",
    "        self.drop_rate = drop_rate\n",
    "        self.weight = weight\n",
    "        self.layers_tree = layers_tree\n",
    "        for i in range(relation_nums):\n",
    "            setattr(self,'Layers'+str(i),Layer_AGG(in_feat,hidden,self.drop_rate,self.weight,num_layers,self.layers_tree))\n",
    "        self.linear=nn.Linear(hidden*relation_nums,out_feat)\n",
    "        self.tsne_weight = tsne_weight\n",
    "        \n",
    "    def forward(self, x, edge_index):\n",
    "        layer_outputs = []\n",
    "\n",
    "        for i in range(self.relation_nums):\n",
    "            layer_output = getattr(self, 'Layers' + str(i))(x, edge_index[i])\n",
    "            layer_outputs.append(layer_output)\n",
    "\n",
    "        x_temp = torch.cat(layer_outputs, dim=1)\n",
    "\n",
    "        x = self.linear(x_temp)\n",
    "        x = F.log_softmax(x, dim=1)\n",
    "            \n",
    "        return x, x_temp\n",
    "\n",
    "def test(idx_eval, y_eval, gnn_model, feat_data, edge_indexs):\n",
    "    gnn_model.eval()\n",
    "    with torch.no_grad():\n",
    "        logits, _ = gnn_model(feat_data, edge_indexs)\n",
    "        x_softmax = torch.exp(logits).cpu().detach()\n",
    "        positive_class_probs = x_softmax[:, 1].numpy()[np.array(idx_eval)]\n",
    "        auc_score = roc_auc_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        ap_score = average_precision_score(np.array(y_eval), np.array(positive_class_probs))\n",
    "        label_prob = (np.array(positive_class_probs) >= 0.5).astype(int)\n",
    "        f1_score_val = f1_score(np.array(y_eval), label_prob, average='macro')\n",
    "        g_mean = calculate_g_mean(np.array(y_eval), label_prob)\n",
    "\n",
    "    return auc_score, ap_score, f1_score_val, g_mean    \n",
    "\n",
    "def hogrl_main(args):\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    print(device)\n",
    "    print('loading data...')\n",
    "    prefix = \"/data/run01/sczc619/LML/MetaTSNE/data/\"\n",
    "    edge_indexs,feat_data,labels = load_data(args['dataset'],args['layers_tree'], prefix)\n",
    "    np.random.seed(args['seed'])\n",
    "    random.seed(args['seed'])\n",
    "    \n",
    "    if args['dataset'] == 'yelp':\n",
    "        index = list(range(len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels, test_size=args['test_size'], random_state=2, shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val, stratify=y_train_val, test_size=args['val_size'], random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Yelp_shortest_distance.pkl\")\n",
    "    elif args['dataset'] == 'amazon':\n",
    "        # 0-3304 are unlabeled nodes\n",
    "        index = list(range(3305, len(labels)))\n",
    "        idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:], stratify=labels[3305:], test_size=args['test_size'], random_state=2, shuffle=True)\n",
    "        idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val, stratify=y_train_val, test_size=args['val_size'], random_state=2, shuffle=True)\n",
    "        dist_path = os.path.join(prefix, \"Amazon_shortest_distance.pkl\")\n",
    "        \n",
    "    train_pos, train_neg = pos_neg_split(idx_train, y_train)\n",
    "    \n",
    "    # 加载距离矩阵\n",
    "#     dist_path = os.path.join(prefix, f\"{args['dataset']}_shortest_distance.pkl\")\n",
    "    with open(dist_path, 'rb') as f:\n",
    "        dist_data = pickle.load(f)\n",
    "        dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)\n",
    "#         print(dist_matrix[0])\n",
    "        \n",
    "#     dist_matrix = torch.tensor(dist_matrix).to(device)\n",
    "    # 模型初始化\n",
    "    gnn_model = multi_HOGRL_Model(\n",
    "        feat_data.shape[1],\n",
    "        2,\n",
    "        len(edge_indexs),\n",
    "        args['emb_size'],\n",
    "        args['drop_rate'],\n",
    "        args['weight'],\n",
    "        args['layers'],\n",
    "        args['layers_tree'], \n",
    "        tsne_weight=0.3 \n",
    "    ).to(device)\n",
    "    \n",
    "    for edge_index in edge_indexs:\n",
    "        edge_index[0] = edge_index[0].to(device)\n",
    "        edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]\n",
    "            \n",
    "    # labels = torch.tensor(labels).to(device)\n",
    "    feat_data = torch.tensor(feat_data).float().to(device)\n",
    "\n",
    "    optimizer = torch.optim.Adam(gnn_model.parameters(), lr=0.005, weight_decay=5e-5)\n",
    "    batch_size = args['batch_size']\n",
    "    \n",
    "    best_val_auc = 0.0\n",
    "    best_model_state = None\n",
    "    \n",
    "    print('training...')\n",
    "    \n",
    "    for epoch in range(args['num_epochs']):\n",
    "        gnn_model.train()\n",
    "#         total_loss = 0\n",
    "        tsne_loss = 0\n",
    "        \n",
    "        # randomly under-sampling negative nodes for each epoch\n",
    "        sampled_idx_train = undersample(train_pos, train_neg, scale=1)\n",
    "        rd.shuffle(sampled_idx_train)\n",
    "        \n",
    "        num_batches = int(len(sampled_idx_train) / batch_size ) + 1\n",
    "#         print(\"num_batches:\",num_batches)\n",
    "        \n",
    "        for batch in range(num_batches):\n",
    "            i_start = batch * batch_size\n",
    "            i_end = min((batch + 1) * batch_size, len(sampled_idx_train))\n",
    "            batch_nodes = sampled_idx_train[i_start:i_end]\n",
    "            batch_label = torch.tensor(labels[np.array(batch_nodes)]).long().to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "#             out,_ = gnn_model(feat_data,edge_indexs)\n",
    "            out, f = gnn_model(feat_data, edge_indexs)\n",
    "            f.retain_grad()\n",
    "        \n",
    "#             print(f\"f norm: {torch.norm(f).item()}\")\n",
    "            \n",
    "            # 只保留tsne_loss\n",
    "#             tsne_loss = torch.tensor(0.0).to(out.device)\n",
    "            tsne_loss = calculate_tsne_loss(f, dist_matrix) * gnn_model.tsne_weight\n",
    "            \n",
    "#             print(tsne_loss)\n",
    "            batch_nodes_tensor = torch.tensor(batch_nodes, dtype=torch.long, device=device)\n",
    "\n",
    "            tsne_loss.backward()\n",
    "\n",
    "            print(\"tsne_loss:\",tsne_loss)\n",
    "            print(f\"Embedding grad stats: mean={f.grad.mean():.3e}, std={f.grad.std():.3e}\")\n",
    "            \n",
    "            optimizer.step()\n",
    "            \n",
    "        \n",
    "#             tsne_loss = torch.tensor(0.0).to(out.device)\n",
    "#             tsne_loss = calculate_tsne_loss(f, dist_matrix) * gnn_model.tsne_weight\n",
    "            \n",
    "# #             print(tsne_loss)\n",
    "#             batch_nodes_tensor = torch.tensor(batch_nodes, dtype=torch.long, device=device)\n",
    "\n",
    "#             # loss = F.nll_loss(out[np.array(batch_nodes)], batch_label)\n",
    "#             cls_loss = F.nll_loss(out[batch_nodes_tensor], batch_label)\n",
    "#             total_loss = cls_loss + tsne_loss\n",
    "#             total_loss = total_loss.sum()\n",
    "# #             print(total_loss)\n",
    "#             total_loss.backward()\n",
    "#             if f.grad is not None:\n",
    "#                 print(f\"Embedding grad norm: {torch.norm(f.grad).item():.4f}\")\n",
    "#             else:\n",
    "#                 print(\"Embedding f gradient is None!\")\n",
    "#             print(\"tsne_loss:\",tsne_loss)\n",
    "# #             print(f\"tsne_loss: {tsne_loss.item():.4f}, total_loss: {total_loss.item():.4f}\")\n",
    "            \n",
    "# #             loss.backward()\n",
    "#             optimizer.step()\n",
    "#             total_loss += total_loss.item()\n",
    "        \n",
    "#             loss += loss.item()\n",
    "        if epoch % 10 == 9: # validate every 10 epochs \n",
    "            val_auc, val_ap, val_f1, val_g_mean = test(idx_val, y_val, gnn_model, feat_data, edge_indexs)\n",
    "            print(f'Epoch: {epoch}, Val AUC: {val_auc:.4f}, Val AP: {val_ap:.4f}, Val F1: {val_f1:.4f}, Val G-mean: {val_g_mean:.4f}')\n",
    "            \n",
    "            if val_auc > best_val_auc:\n",
    "                best_val_auc = val_auc\n",
    "                best_model_state = gnn_model.state_dict() \n",
    "                \n",
    "    gnn_model.load_state_dict(best_model_state)  \n",
    "    test_auc, test_ap, test_f1, test_g_mean = test(idx_test, y_test, gnn_model, feat_data, edge_indexs)\n",
    "    print(f'Test AUC: {test_auc:.4f}, Test AP: {test_ap:.4f}, '\n",
    "        f'Test F1: {test_f1:.4f}, Test G-mean: {test_g_mean:.4f}')\n",
    "    out,embedding = gnn_model(feat_data,edge_indexs)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1ea03197",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda\n",
      "loading data...\n"
     ]
    },
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: '/data/run01/sczc619/LML/MetaTSNE/data/yelpnet_rur_matrix_decompision_1.pkl'",
     "output_type": "error",
     "traceback": [
      "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
      "\u001b[31mFileNotFoundError\u001b[39m                         Traceback (most recent call last)",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 17\u001b[39m\n\u001b[32m      1\u001b[39m args = {\n\u001b[32m      2\u001b[39m     \u001b[33m\"\u001b[39m\u001b[33mdataset\u001b[39m\u001b[33m\"\u001b[39m: \u001b[33m\"\u001b[39m\u001b[33myelp\u001b[39m\u001b[33m\"\u001b[39m,\n\u001b[32m      3\u001b[39m     \u001b[33m\"\u001b[39m\u001b[33mbatch_size\u001b[39m\u001b[33m\"\u001b[39m: \u001b[32m256\u001b[39m,\n\u001b[32m   (...)\u001b[39m\u001b[32m     14\u001b[39m     \u001b[33m\"\u001b[39m\u001b[33mdrop_rate\u001b[39m\u001b[33m\"\u001b[39m: \u001b[32m0.3\u001b[39m\n\u001b[32m     15\u001b[39m }\n\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[43mhogrl_main\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m)\u001b[49m\n",
      "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 48\u001b[39m, in \u001b[36mhogrl_main\u001b[39m\u001b[34m(args)\u001b[39m\n\u001b[32m     46\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m'\u001b[39m\u001b[33mloading data...\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m     47\u001b[39m prefix = \u001b[33m\"\u001b[39m\u001b[33m/data/run01/sczc619/LML/MetaTSNE/data/\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m---> \u001b[39m\u001b[32m48\u001b[39m edge_indexs,feat_data,labels = \u001b[43mload_data\u001b[49m\u001b[43m(\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mdataset\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43margs\u001b[49m\u001b[43m[\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mlayers_tree\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprefix\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     49\u001b[39m np.random.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n\u001b[32m     50\u001b[39m random.seed(args[\u001b[33m'\u001b[39m\u001b[33mseed\u001b[39m\u001b[33m'\u001b[39m])\n",
      "\u001b[36mFile \u001b[39m\u001b[32m/data/run01/sczc619/LML/MetaTSNE/methods/hogrl/hogrl_utils.py:104\u001b[39m, in \u001b[36mload_data\u001b[39m\u001b[34m(data, k, prefix)\u001b[39m\n\u001b[32m    102\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[32m1\u001b[39m,k+\u001b[32m1\u001b[39m):\n\u001b[32m    103\u001b[39m     file_name = \u001b[33m'\u001b[39m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[38;5;132;01m{}\u001b[39;00m\u001b[33m.pkl\u001b[39m\u001b[33m'\u001b[39m.format(matrix_prefix[\u001b[33m'\u001b[39m\u001b[33myelp_rur\u001b[39m\u001b[33m'\u001b[39m], i)\n\u001b[32m--> \u001b[39m\u001b[32m104\u001b[39m     \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfile_name\u001b[49m\u001b[43m,\u001b[49m\u001b[33;43m'\u001b[39;49m\u001b[33;43mrb\u001b[39;49m\u001b[33;43m'\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m file:\n\u001b[32m    105\u001b[39m         tree = pickle.load(file)\n\u001b[32m    106\u001b[39m     file.close()\n",
      "\u001b[31mFileNotFoundError\u001b[39m: [Errno 2] No such file or directory: '/data/run01/sczc619/LML/MetaTSNE/data/yelpnet_rur_matrix_decompision_1.pkl'"
     ]
    }
   ],
   "source": [
    "args = {\n",
    "    \"dataset\": \"yelp\",\n",
    "    \"batch_size\": 256,\n",
    "    \"lr\": 0.005,\n",
    "    \"weight_decay\": 0.00005,\n",
    "    \"emb_size\": 64,\n",
    "    \"num_epochs\": 500,\n",
    "    \"weight\": 0.6,\n",
    "    \"layers\": 7,\n",
    "    \"test_size\": 0.6,\n",
    "    \"val_size\": 0.5,\n",
    "    \"layers_tree\": 7,\n",
    "    \"seed\": 76,\n",
    "    \"drop_rate\": 0.3\n",
    "}\n",
    "\n",
    "hogrl_main(args)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c542afa3",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tsne01",
   "language": "python",
   "name": "tsne01"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
