{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "2161b7b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import copy\n",
    "import numpy as np\n",
    "import torch\n",
    "from sklearn.metrics import average_precision_score, roc_auc_score\n",
    "\n",
    "def idNode(data, id_new_value_old):\n",
    "    data = copy.deepcopy(data)\n",
    "    \n",
    "    data.x = None\n",
    "    data.y[data.val_id] = -1\n",
    "    data.y[data.test_id] = -1\n",
    "    data.y = data.y[id_new_value_old]\n",
    "\n",
    "    data.train_id = None\n",
    "    data.test_id = None\n",
    "    data.val_id = None\n",
    "\n",
    "    id_old_value_new = torch.zeros(id_new_value_old.shape[0], dtype = torch.long)\n",
    "    id_old_value_new[id_new_value_old] = torch.arange(0, id_new_value_old.shape[0], dtype = torch.long)\n",
    "    row = data.edge_idxs\n",
    "    row = id_old_value_new[row]\n",
    "    data.edge_idxs = torch.stack([row], dim=0)\n",
    "    \n",
    "    return data\n",
    "\n",
    "def shuffleData(data):\n",
    "    data = copy.deepcopy(data)\n",
    "    \n",
    "    id_new_value_old = np.arange(data.shape[0])\n",
    "   \n",
    "    train_id_shuffle = copy.deepcopy(data.train_id)\n",
    "   \n",
    "    np.random.shuffle(train_id_shuffle)\n",
    "    \n",
    "    id_new_value_old[data.train_id] = train_id_shuffle\n",
    "    \n",
    "    data = idNode(data, id_new_value_old)\n",
    "   \n",
    "    return data, id_new_value_old\n",
    "\n",
    "\n",
    "def eval_edge_prediction(model, negative_edge_sampler, data, n_neighbors, batch_size=200):\n",
    "  # Ensures the random sampler uses a seed for evaluation (i.e. we sample always the same\n",
    "  # negatives for validation / test set)\n",
    "  assert negative_edge_sampler.seed is not None\n",
    "  negative_edge_sampler.reset_random_state()\n",
    "\n",
    "  val_ap, val_auc = [], []\n",
    "  with torch.no_grad():\n",
    "    model = model.eval()\n",
    "    # While usually the test batch size is as big as it fits in memory, here we keep it the same\n",
    "    # size as the training batch size, since it allows the memory to be updated more frequently,\n",
    "    # and later test batches to access information from interactions in previous test batches\n",
    "    # through the memory\n",
    "    TEST_BATCH_SIZE = batch_size\n",
    "    num_test_instance = len(data.sources)\n",
    "    num_test_batch = math.ceil(num_test_instance / TEST_BATCH_SIZE)\n",
    "\n",
    "    for k in range(num_test_batch):\n",
    "      s_idx = k * TEST_BATCH_SIZE\n",
    "      e_idx = min(num_test_instance, s_idx + TEST_BATCH_SIZE)\n",
    "      sources_batch = data.sources[s_idx:e_idx]\n",
    "      destinations_batch = data.destinations[s_idx:e_idx]\n",
    "      timestamps_batch = data.timestamps[s_idx:e_idx]\n",
    "      edge_idxs_batch = data.edge_idxs[s_idx: e_idx]\n",
    "\n",
    "      size = len(sources_batch)\n",
    "      _, negative_samples = negative_edge_sampler.sample(size)\n",
    "\n",
    "      pos_prob, neg_prob = model.compute_edge_probabilities(sources_batch, destinations_batch,\n",
    "                                                            negative_samples, timestamps_batch,\n",
    "                                                            edge_idxs_batch, n_neighbors)\n",
    "\n",
    "      pred_score = np.concatenate([(pos_prob).cpu().numpy(), (neg_prob).cpu().numpy()])\n",
    "      true_label = np.concatenate([np.ones(size), np.zeros(size)])\n",
    "\n",
    "      val_ap.append(average_precision_score(true_label, pred_score))\n",
    "      val_auc.append(roc_auc_score(true_label, pred_score))\n",
    "\n",
    "  return np.mean(val_ap), np.mean(val_auc)\n",
    "\n",
    "\n",
    "def eval_node_classification(tgn, decoder, data, edge_idxs, batch_size, n_neighbors):\n",
    "  pred_prob = np.zeros(len(data.sources))\n",
    "  num_instance = len(data.sources)\n",
    "  num_batch = math.ceil(num_instance / batch_size)\n",
    "\n",
    "  with torch.no_grad():\n",
    "    decoder.eval()\n",
    "    tgn.eval()\n",
    "    \n",
    "    s_idx =  0\n",
    "    e_idx = batch_size\n",
    "\n",
    "    sources_batch = data.sources[s_idx: e_idx]\n",
    "    destinations_batch = data.destinations[s_idx: e_idx]\n",
    "    timestamps_batch = data.timestamps[s_idx:e_idx]\n",
    "    edge_idxs_batch = edge_idxs[s_idx: e_idx]\n",
    "    labels_batch = data.labels[s_idx: e_idx]\n",
    "    source_embedding, destination_embedding, _ = tgn.compute_temporal_embeddings(sources_batch,\n",
    "                                                                                   destinations_batch,\n",
    "                                                                                   destinations_batch,\n",
    "                                                                                   timestamps_batch,\n",
    "                                                                                   edge_idxs_batch,\n",
    "                                                                                   n_neighbors)\n",
    "    \n",
    "    \n",
    "    \n",
    "    \n",
    "    ##################\n",
    "    node_id = np.arange(source_embedding.shape[0])\n",
    "    np.random.shuffle(node_id)\n",
    "    x = np.arange(batch_size ).reshape(2, int(batch_size/2))\n",
    "    source_embedding.edge_idxs = torch.tensor( x, dtype= torch.int64)\n",
    "    \n",
    "    source_embedding.train_id = node_id[:int(source_embedding.shape[0] * 0.6)]\n",
    "    source_embedding.val_id = node_id[int(source_embedding.shape[0] * 0.6):int(source_embedding.shape[0] * 0.8)]\n",
    "    source_embedding.test_id = node_id[int(source_embedding.shape[0] * 0.8):]\n",
    "    \n",
    "    source_embedding.y = torch.tensor( labels_batch , dtype= torch.int64)\n",
    "    data_b, id_new_value_old = shuffleData(source_embedding)\n",
    "        \n",
    "    lam = lam = np.random.beta(4.0, 4.0)\n",
    "    labels_batch_torch = torch.from_numpy(labels_batch).float()\n",
    "    pred_prob_batch = decoder(source_embedding, source_embedding.edge_idxs, data_b.edge_idxs, lam , id_new_value_old).sigmoid()\n",
    "    pred_prob_batch = torch.flatten(pred_prob_batch)\n",
    "    labels_batch_torch = torch.stack((1 - labels_batch_torch, labels_batch_torch), dim=-1)\n",
    "    labels_batch_torch = torch.flatten(labels_batch_torch) \n",
    "    #pred_prob[s_idx: e_idx] = pred_prob_batch[s_idx: e_idx] \n",
    "    #pred_prob[s_idx: e_idx] = pred_prob_batch.cpu().numpy()\n",
    "    print\n",
    "  auc_roc = roc_auc_score(labels_batch_torch, pred_prob_batch)   \n",
    "  return auc_roc"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "5adbf4b9",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "INFO:root:Namespace(aggregator='last', backprop_every=1, bs=500, data='wikipedia', different_new_nodes=False, drop_out=0.1, embedding_module='graph_attention', gpu=0, lr=0.0003, memory_dim=172, memory_update_at_end=False, message_dim=100, message_function='identity', n_degree=10, n_epoch=10, n_head=2, n_layer=1, n_neg=1, n_runs=1, new_node=False, node_dim=100, patience=5, prefix='', randomize_features=False, time_dim=100, uniform=False, use_destination_embedding_in_message=False, use_memory=False, use_source_embedding_in_message=False, use_validation=False)\n",
      "DEBUG:root:Num of training instances: 133853\n",
      "DEBUG:root:Num of batches per epoch: 268\n",
      "INFO:root:Loading saved TGN model\n",
      "INFO:root:TGN models loaded\n",
      "INFO:root:Start training node classification task\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "x0 torch.Size([500, 172])\n"
     ]
    },
    {
     "ename": "TypeError",
     "evalue": "forward() takes 2 positional arguments but 4 were given",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "Cell \u001b[0;32mIn [2], line 214\u001b[0m\n\u001b[1;32m    212\u001b[0m labels_batch_torch \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mfrom_numpy(labels_batch)\u001b[38;5;241m.\u001b[39mfloat()\u001b[38;5;241m.\u001b[39mto(device)\n\u001b[1;32m    213\u001b[0m data_b, id_new_value_old \u001b[38;5;241m=\u001b[39m shuffleData(source_embedding)\n\u001b[0;32m--> 214\u001b[0m pred \u001b[38;5;241m=\u001b[39m \u001b[43mdecoder\u001b[49m\u001b[43m(\u001b[49m\u001b[43msource_embedding\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43msource_embedding\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43medge_idxs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mdata_b\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43medge_idxs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlam\u001b[49m\u001b[43m \u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mid_new_value_old\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241m.\u001b[39msigmoid()\n\u001b[1;32m    215\u001b[0m   \u001b[38;5;66;03m#pred = torch.flatten(pred)\u001b[39;00m\n\u001b[1;32m    216\u001b[0m labels_batch_torch \u001b[38;5;241m=\u001b[39m torch\u001b[38;5;241m.\u001b[39mstack((\u001b[38;5;241m1\u001b[39m \u001b[38;5;241m-\u001b[39m labels_batch_torch, labels_batch_torch), dim\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m)\n",
      "File \u001b[0;32m~/opt/anaconda3/envs/M1Max/lib/python3.8/site-packages/torch/nn/modules/module.py:1190\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1186\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1187\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1188\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1189\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1190\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1191\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1192\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "File \u001b[0;32m~/Documents/Codes/tgn-master 3 copy 2/utils/utils1.py:24\u001b[0m, in \u001b[0;36mNet.forward\u001b[0;34m(self, x0, edge_index, edge_index_b, lam, id_new_value_old)\u001b[0m\n\u001b[1;32m     16\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, x0, edge_index, edge_index_b, lam, id_new_value_old):\n\u001b[1;32m     17\u001b[0m     \u001b[38;5;66;03m#edge_index =edge_index.reshape(2, -1)\u001b[39;00m\n\u001b[1;32m     18\u001b[0m     \u001b[38;5;66;03m#edge_index_b= edge_index_b.reshape(2, -1)\u001b[39;00m\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m     21\u001b[0m     \u001b[38;5;66;03m#x = self.dropout(x)\u001b[39;00m\n\u001b[1;32m     22\u001b[0m     \u001b[38;5;66;03m#return self.fc_3(x).squeeze(dim=1)\u001b[39;00m\n\u001b[1;32m     23\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mx0\u001b[39m\u001b[38;5;124m\"\u001b[39m,x0\u001b[38;5;241m.\u001b[39mshape)\n\u001b[0;32m---> 24\u001b[0m     x1 \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfc_1\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx0\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43medge_index\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx0\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m     25\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mx1\u001b[39m\u001b[38;5;124m\"\u001b[39m,x1\u001b[38;5;241m.\u001b[39mshape)\n\u001b[1;32m     26\u001b[0m     x1 \u001b[38;5;241m=\u001b[39m F\u001b[38;5;241m.\u001b[39mrelu(x1)\n",
      "File \u001b[0;32m~/opt/anaconda3/envs/M1Max/lib/python3.8/site-packages/torch/nn/modules/module.py:1190\u001b[0m, in \u001b[0;36mModule._call_impl\u001b[0;34m(self, *input, **kwargs)\u001b[0m\n\u001b[1;32m   1186\u001b[0m \u001b[38;5;66;03m# If we don't have any hooks, we want to skip the rest of the logic in\u001b[39;00m\n\u001b[1;32m   1187\u001b[0m \u001b[38;5;66;03m# this function, and just call forward.\u001b[39;00m\n\u001b[1;32m   1188\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_backward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_forward_pre_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_backward_hooks\n\u001b[1;32m   1189\u001b[0m         \u001b[38;5;129;01mor\u001b[39;00m _global_forward_hooks \u001b[38;5;129;01mor\u001b[39;00m _global_forward_pre_hooks):\n\u001b[0;32m-> 1190\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mforward_call\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m   1191\u001b[0m \u001b[38;5;66;03m# Do not call functions when jit is used\u001b[39;00m\n\u001b[1;32m   1192\u001b[0m full_backward_hooks, non_full_backward_hooks \u001b[38;5;241m=\u001b[39m [], []\n",
      "\u001b[0;31mTypeError\u001b[0m: forward() takes 2 positional arguments but 4 were given"
     ]
    }
   ],
   "source": [
    "import math\n",
    "import logging\n",
    "import time\n",
    "import sys\n",
    "import random\n",
    "import argparse\n",
    "import pickle\n",
    "from pathlib import Path\n",
    "\n",
    "import torch\n",
    "import numpy as np\n",
    "\n",
    "from model.tgn import TGN\n",
    "from utils.utils1 import EarlyStopMonitor, get_neighbor_finder, Net\n",
    "from utils.data_processing import compute_time_statistics, get_data_node_classification\n",
    "from evaluation.evaluation import eval_node_classification\n",
    "\n",
    "random.seed(0)\n",
    "np.random.seed(0)\n",
    "torch.manual_seed(0)\n",
    "\n",
    "### Argument and global variables\n",
    "parser = argparse.ArgumentParser('TGN self-supervised training')\n",
    "parser.add_argument('-d', '--data', type=str, help='Dataset name (eg. wikipedia or reddit)',\n",
    "                    default='wikipedia')\n",
    "parser.add_argument('--bs', type=int, default=500, help='Batch_size')\n",
    "parser.add_argument('--prefix', type=str, default='', help='Prefix to name the checkpoints')\n",
    "parser.add_argument('--n_degree', type=int, default=10, help='Number of neighbors to sample')\n",
    "parser.add_argument('--n_head', type=int, default=2, help='Number of heads used in attention layer')\n",
    "parser.add_argument('--n_epoch', type=int, default=10, help='Number of epochs')\n",
    "parser.add_argument('--n_layer', type=int, default=1, help='Number of network layers')\n",
    "parser.add_argument('--lr', type=float, default=3e-4, help='Learning rate')\n",
    "parser.add_argument('--patience', type=int, default=5, help='Patience for early stopping')\n",
    "parser.add_argument('--n_runs', type=int, default=1, help='Number of runs')\n",
    "parser.add_argument('--drop_out', type=float, default=0.1, help='Dropout probability')\n",
    "parser.add_argument('--gpu', type=int, default=0, help='Idx for the gpu to use')\n",
    "parser.add_argument('--node_dim', type=int, default=100, help='Dimensions of the node embedding')\n",
    "parser.add_argument('--time_dim', type=int, default=100, help='Dimensions of the time embedding')\n",
    "parser.add_argument('--backprop_every', type=int, default=1, help='Every how many batches to '\n",
    "                                                                  'backprop')\n",
    "parser.add_argument('--use_memory', action='store_true',\n",
    "                    help='Whether to augment the model with a node memory')\n",
    "parser.add_argument('--embedding_module', type=str, default=\"graph_attention\", choices=[\n",
    "  \"graph_attention\", \"graph_sum\", \"identity\", \"time\"], help='Type of embedding module')\n",
    "parser.add_argument('--message_function', type=str, default=\"identity\", choices=[\n",
    "  \"mlp\", \"identity\"], help='Type of message function')\n",
    "parser.add_argument('--aggregator', type=str, default=\"last\", help='Type of message '\n",
    "                                                                        'aggregator')\n",
    "parser.add_argument('--memory_update_at_end', action='store_true',\n",
    "                    help='Whether to update memory at the end or at the start of the batch')\n",
    "parser.add_argument('--message_dim', type=int, default=100, help='Dimensions of the messages')\n",
    "parser.add_argument('--memory_dim', type=int, default=172, help='Dimensions of the memory for '\n",
    "                                                                'each user')\n",
    "parser.add_argument('--different_new_nodes', action='store_true',\n",
    "                    help='Whether to use disjoint set of new nodes for train and val')\n",
    "parser.add_argument('--uniform', action='store_true',\n",
    "                    help='take uniform sampling from temporal neighbors')\n",
    "parser.add_argument('--randomize_features', action='store_true',\n",
    "                    help='Whether to randomize node features')\n",
    "parser.add_argument('--use_destination_embedding_in_message', action='store_true',\n",
    "                    help='Whether to use the embedding of the destination node as part of the message')\n",
    "parser.add_argument('--use_source_embedding_in_message', action='store_true',\n",
    "                    help='Whether to use the embedding of the source node as part of the message')\n",
    "parser.add_argument('--n_neg', type=int, default=1)\n",
    "parser.add_argument('--use_validation', action='store_true',\n",
    "                    help='Whether to use a validation set')\n",
    "parser.add_argument('--new_node', action='store_true', help='model new node')\n",
    "\n",
    "try:\n",
    "  args = parser.parse_args(args=[])\n",
    "except:\n",
    "  parser.print_help()\n",
    "  sys.exit(0)\n",
    "\n",
    "BATCH_SIZE = args.bs\n",
    "NUM_NEIGHBORS = args.n_degree\n",
    "NUM_NEG = 1\n",
    "NUM_EPOCH = args.n_epoch\n",
    "NUM_HEADS = args.n_head\n",
    "DROP_OUT = args.drop_out\n",
    "GPU = args.gpu\n",
    "UNIFORM = args.uniform\n",
    "NEW_NODE = args.new_node\n",
    "SEQ_LEN = NUM_NEIGHBORS\n",
    "DATA = args.data\n",
    "NUM_LAYER = args.n_layer\n",
    "LEARNING_RATE = args.lr\n",
    "NODE_LAYER = 1\n",
    "NODE_DIM = args.node_dim\n",
    "TIME_DIM = args.time_dim\n",
    "USE_MEMORY = args.use_memory\n",
    "MESSAGE_DIM = args.message_dim\n",
    "MEMORY_DIM = args.memory_dim\n",
    "\n",
    "Path(\"./saved_models/\").mkdir(parents=True, exist_ok=True)\n",
    "Path(\"./saved_checkpoints/\").mkdir(parents=True, exist_ok=True)\n",
    "MODEL_SAVE_PATH = f'./saved_models/{args.prefix}-{args.data}' + '\\\n",
    "  node-classification.pth'\n",
    "get_checkpoint_path = lambda \\\n",
    "    epoch: f'./saved_checkpoints/{args.prefix}-{args.data}-{epoch}' + '\\\n",
    "  node-classification.pth'\n",
    "\n",
    "### set up logger\n",
    "logging.basicConfig(level=logging.INFO)\n",
    "logger = logging.getLogger()\n",
    "logger.setLevel(logging.DEBUG)\n",
    "fh = logging.FileHandler('log/{}.log'.format(str(time.time())))\n",
    "fh.setLevel(logging.DEBUG)\n",
    "ch = logging.StreamHandler()\n",
    "ch.setLevel(logging.WARN)\n",
    "formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n",
    "fh.setFormatter(formatter)\n",
    "ch.setFormatter(formatter)\n",
    "logger.addHandler(fh)\n",
    "logger.addHandler(ch)\n",
    "logger.info(args)\n",
    "\n",
    "full_data, node_features, edge_features, train_data, val_data, test_data = \\\n",
    "  get_data_node_classification(DATA, use_validation=args.use_validation)\n",
    "\n",
    "max_idx = max(full_data.unique_nodes)\n",
    "\n",
    "train_ngh_finder = get_neighbor_finder(train_data, uniform=UNIFORM, max_node_idx=max_idx)\n",
    "\n",
    "# Set device\n",
    "device_string = 'cuda:{}'.format(GPU) if torch.cuda.is_available() else 'cpu'\n",
    "device = torch.device(device_string)\n",
    "\n",
    "# Compute time statistics\n",
    "mean_time_shift_src, std_time_shift_src, mean_time_shift_dst, std_time_shift_dst = \\\n",
    "  compute_time_statistics(full_data.sources, full_data.destinations, full_data.timestamps)\n",
    "\n",
    "for i in range(args.n_runs):\n",
    "  results_path = \"results/{}_node_classification_{}.pkl\".format(args.prefix,\n",
    "                                                                i) if i > 0 else \"results/{}_node_classification.pkl\".format(\n",
    "    args.prefix)\n",
    "  Path(\"results/\").mkdir(parents=True, exist_ok=True)\n",
    "\n",
    "  # Initialize Model\n",
    "  tgn = TGN(neighbor_finder=train_ngh_finder, node_features=node_features,\n",
    "            edge_features=edge_features, device=device,\n",
    "            n_layers=NUM_LAYER,\n",
    "            n_heads=NUM_HEADS, dropout=DROP_OUT, use_memory=USE_MEMORY,\n",
    "            message_dimension=MESSAGE_DIM, memory_dimension=MEMORY_DIM,\n",
    "            memory_update_at_start=not args.memory_update_at_end,\n",
    "            embedding_module_type=args.embedding_module,\n",
    "            message_function=args.message_function,\n",
    "            aggregator_type=args.aggregator, n_neighbors=NUM_NEIGHBORS,\n",
    "            mean_time_shift_src=mean_time_shift_src, std_time_shift_src=std_time_shift_src,\n",
    "            mean_time_shift_dst=mean_time_shift_dst, std_time_shift_dst=std_time_shift_dst,\n",
    "            use_destination_embedding_in_message=args.use_destination_embedding_in_message,\n",
    "            use_source_embedding_in_message=args.use_source_embedding_in_message)\n",
    "\n",
    "  tgn = tgn.to(device)\n",
    "\n",
    "  num_instance = len(train_data.sources)\n",
    "  num_batch = math.ceil(num_instance / BATCH_SIZE)\n",
    "  \n",
    "  logger.debug('Num of training instances: {}'.format(num_instance))\n",
    "  logger.debug('Num of batches per epoch: {}'.format(num_batch))\n",
    "\n",
    "  logger.info('Loading saved TGN model')\n",
    "  model_path = f'./saved_models/{args.prefix}-{DATA}.pth'\n",
    "  tgn.load_state_dict(torch.load(model_path))\n",
    "  tgn.eval()\n",
    "  logger.info('TGN models loaded')\n",
    "  logger.info('Start training node classification task')\n",
    "\n",
    "  decoder =Net(hidden_channels= node_features.shape[1], in_channel = 172, out_channel = 2)\n",
    "  decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=args.lr)\n",
    "  decoder = decoder.to(device)\n",
    "  decoder_loss_criterion = torch.nn.BCELoss()\n",
    "\n",
    "  val_aucs = []\n",
    "  train_losses = []\n",
    "\n",
    "  early_stopper = EarlyStopMonitor(max_round=args.patience)\n",
    "  for epoch in range(args.n_epoch):\n",
    "    start_epoch = time.time()\n",
    "    \n",
    "    # Initialize memory of the model at each epoch\n",
    "    if USE_MEMORY:\n",
    "      tgn.memory.__init_memory__()\n",
    "\n",
    "    tgn = tgn.eval()\n",
    "    decoder = decoder.train()\n",
    "    loss = 0\n",
    "    for k in range(num_batch):\n",
    "      s_idx = k * BATCH_SIZE\n",
    "      e_idx = min(num_instance, s_idx + BATCH_SIZE)\n",
    "      sources_batch = train_data.sources[s_idx: e_idx]\n",
    "      destinations_batch = train_data.destinations[s_idx: e_idx]\n",
    "      timestamps_batch = train_data.timestamps[s_idx: e_idx]\n",
    "      edge_idxs_batch = full_data.edge_idxs[s_idx: int(e_idx/2)]\n",
    "      labels_batch = train_data.labels[s_idx: e_idx]\n",
    "      decoder_optimizer.zero_grad()\n",
    "      with torch.no_grad():\n",
    "           source_embedding, destination_embedding, _ = tgn.compute_temporal_embeddings(sources_batch,\n",
    "                                                                                     destinations_batch,\n",
    "                                                                                     destinations_batch,\n",
    "                                                                                     timestamps_batch,\n",
    "                                                                                     edge_idxs_batch,\n",
    "                                                                                     NUM_NEIGHBORS)\n",
    "      node_id = np.arange(source_embedding.shape[0])\n",
    "      np.random.shuffle(node_id)\n",
    "      source_embedding.edge_idxs = torch.tensor( edge_idxs_batch  , dtype= torch.int64)\n",
    "      source_embedding.train_id = node_id[:int(source_embedding.shape[0] * 0.6)]\n",
    "      source_embedding.val_id = node_id[int(source_embedding.shape[0] * 0.6):int(source_embedding.shape[0] * 0.8)]\n",
    "      source_embedding.test_id = node_id[int(source_embedding.shape[0] * 0.8):]\n",
    "      source_embedding.y = torch.tensor( labels_batch , dtype= torch.int64)\n",
    "      lam = lam = np.random.beta(4.0, 4.0)\n",
    "      labels_batch_torch = torch.from_numpy(labels_batch).float().to(device)\n",
    "      data_b, id_new_value_old = shuffleData(source_embedding)\n",
    "      pred = decoder(source_embedding, source_embedding.edge_idxs, data_b.edge_idxs, lam , id_new_value_old).sigmoid()\n",
    "        #pred = torch.flatten(pred)\n",
    "      labels_batch_torch = torch.stack((1 - labels_batch_torch, labels_batch_torch), dim=-1)\n",
    "      decoder_loss = decoder_loss_criterion(pred, labels_batch_torch)\n",
    "      decoder_loss.backward()\n",
    "      decoder_optimizer.step()\n",
    "      loss += decoder_loss.item()\n",
    "    train_losses.append(loss / num_batch)\n",
    "    val_auc = eval_node_classification(tgn, decoder, val_data, full_data.edge_idxs, BATCH_SIZE,\n",
    "                                       n_neighbors=NUM_NEIGHBORS)\n",
    "    val_aucs.append(val_auc)\n",
    "\n",
    "\n",
    "    pickle.dump({\n",
    "      \"val_aps\": val_aucs,\n",
    "      \"train_losses\": train_losses,\n",
    "      \"epoch_times\": [0.0],\n",
    "      \"new_nodes_val_aps\": [],\n",
    "    }, open(results_path, \"wb\"))\n",
    "\n",
    "    logger.info(f'Epoch {epoch}: train loss: {loss / num_batch}, val auc: {val_auc}, time: {time.time() - start_epoch}')\n",
    "  \n",
    "  if args.use_validation:\n",
    "    if early_stopper.early_stop_check(val_auc):\n",
    "      logger.info('No improvement over {} epochs, stop training'.format(early_stopper.max_round))\n",
    "      break\n",
    "    else:\n",
    "      torch.save(decoder.state_dict(), get_checkpoint_path(epoch))\n",
    "\n",
    "  if args.use_validation:\n",
    "    logger.info(f'Loading the best model at epoch {early_stopper.best_epoch}')\n",
    "    best_model_path = get_checkpoint_path(early_stopper.best_epoch)\n",
    "    decoder.load_state_dict(torch.load(best_model_path))\n",
    "    logger.info(f'Loaded the best model at epoch {early_stopper.best_epoch} for inference')\n",
    "    decoder.eval()\n",
    "\n",
    "    test_auc = eval_node_classification(tgn, decoder, test_data, full_data.edge_idxs, BATCH_SIZE,\n",
    "                                        n_neighbors=NUM_NEIGHBORS)\n",
    "  else:\n",
    "    # If we are not using a validation set, the test performance is just the performance computed\n",
    "    # in the last epoch\n",
    "    test_auc = val_aucs[-1]\n",
    "    \n",
    "  pickle.dump({\n",
    "    \"val_aps\": val_aucs,\n",
    "    \"test_ap\": test_auc,\n",
    "    \"train_losses\": train_losses,\n",
    "    \"epoch_times\": [0.0],\n",
    "    \"new_nodes_val_aps\": [],\n",
    "    \"new_node_test_ap\": 0,\n",
    "  }, open(results_path, \"wb\"))\n",
    "\n",
    "  logger.info(f'test auc: {test_auc}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7981ce14",
   "metadata": {},
   "outputs": [],
   "source": [
    "import seaborn as sns\n",
    "import matplotlib.pyplot as plt\n",
    "# Use plot styling from seaborn.\n",
    "sns.set(style='darkgrid')\n",
    "\n",
    "# Increase the plot size and font size.\n",
    "sns.set(font_scale=1.5)\n",
    "plt.rcParams[\"figure.figsize\"] = (12,6)\n",
    "\n",
    "# Plot the learning curve.\n",
    "plt.plot(train_losses, 'b-o', label=\"Training\")\n",
    "\n",
    "plt.plot(val_aucs, 'g-o', label=\"Validation\")\n",
    "\n",
    "\n",
    "# Label the plot.\n",
    "plt.title(\"Training & Validation Loss\")\n",
    "plt.xlabel(\"Epoch\")\n",
    "plt.ylabel(\"Loss\")\n",
    "plt.legend()\n",
    "plt.xticks([1, 2, 3, 4, 5, 6, 7,8,9,10])\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c53313f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad89af1c",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
