{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse, time, math\n",
    "import numpy as np\n",
    "import networkx as nx\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import dgl\n",
    "from dgl.data import register_data_args\n",
    "\n",
    "\n",
    "def gcn_msg(edge):\n",
    "    msg = edge.src['h'] * edge.src['norm']\n",
    "    return {'m': msg}\n",
    "\n",
    "\n",
    "def gcn_reduce(node):\n",
    "    accum = torch.sum(node.mailbox['m'], 1) * node.data['norm']\n",
    "    return {'h': accum}\n",
    "\n",
    "\n",
    "class NodeApplyModule(nn.Module):\n",
    "    def __init__(self, out_feats, activation=torch.sigmoid, bias=True):\n",
    "        super(NodeApplyModule, self).__init__()\n",
    "        if bias:\n",
    "            self.bias = nn.Parameter(torch.Tensor(out_feats))\n",
    "        else:\n",
    "            self.bias = None\n",
    "        self.activation = activation\n",
    "        self.reset_parameters()\n",
    "\n",
    "    def reset_parameters(self):\n",
    "        if self.bias is not None:\n",
    "            stdv = 1. / math.sqrt(self.bias.size(0))\n",
    "            self.bias.data.uniform_(-stdv, stdv)\n",
    "\n",
    "    def forward(self, nodes):\n",
    "        h = nodes.data['h']\n",
    "        if self.bias is not None:\n",
    "            h = h + self.bias\n",
    "        if self.activation:\n",
    "            h = self.activation(h)\n",
    "        return {'h': h}\n",
    "\n",
    "\n",
    "class GCNLayer(nn.Module):\n",
    "    def __init__(self,\n",
    "                 g,\n",
    "                 in_feats,\n",
    "                 out_feats,\n",
    "                 activation,\n",
    "                 dropout,\n",
    "                 bias=True):\n",
    "        super(GCNLayer, self).__init__()\n",
    "        self.g = g\n",
    "        self.weight = nn.Parameter(torch.Tensor(in_feats, out_feats))\n",
    "        if dropout:\n",
    "            self.dropout = nn.Dropout(p=dropout)\n",
    "        else:\n",
    "            self.dropout = 0.\n",
    "        self.node_update = NodeApplyModule(out_feats, activation, bias)\n",
    "        self.reset_parameters()\n",
    "\n",
    "    def reset_parameters(self):\n",
    "        stdv = 1. / math.sqrt(self.weight.size(1))\n",
    "        self.weight.data.uniform_(-stdv, stdv)\n",
    "\n",
    "    def forward(self, h):\n",
    "        if self.dropout:\n",
    "            h = self.dropout(h)\n",
    "        self.g.ndata['h'] = torch.mm(h, self.weight)\n",
    "        self.g.update_all(gcn_msg, gcn_reduce, self.node_update)\n",
    "        h = self.g.ndata.pop('h')\n",
    "        return h\n",
    "\n",
    "class GCN(nn.Module):\n",
    "    def __init__(self,\n",
    "                 g,\n",
    "                 in_feats,\n",
    "                 n_hidden,\n",
    "                 n_classes,\n",
    "                 n_layers,\n",
    "                 activation,\n",
    "                 dropout):\n",
    "        super(GCN, self).__init__()\n",
    "        self.layers = nn.ModuleList()\n",
    "        # input layer\n",
    "        self.layers.append(GCNLayer(g, in_feats, n_hidden, activation, dropout))\n",
    "        # hidden layers\n",
    "        for i in range(n_layers - 1):\n",
    "            self.layers.append(GCNLayer(g, n_hidden, n_hidden, activation, dropout))\n",
    "        # output layer\n",
    "        self.layers.append(GCNLayer(g, n_hidden, n_classes, None, dropout))\n",
    "\n",
    "        #\n",
    "    def forward(self, features):\n",
    "        h = features\n",
    "        for layer in self.layers:\n",
    "            h = layer(h)\n",
    "        return h"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate(model, features, labels, mask):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        logits = model(features)\n",
    "        logits = logits[mask]\n",
    "        labels = labels[mask]\n",
    "        _, indices = torch.max(logits, dim=1)\n",
    "        probas = F.softmax(logits)\n",
    "        return metrics(labels.cpu().detach(), indices.cpu().detach(), probas.cpu().detach()[:,1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def metrics(y_true, y_pred, y_prob):\n",
    "\n",
    "    y_true, y_pred, y_prob = y_true.numpy(), y_pred.numpy(), y_prob.numpy()\n",
    "    tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n",
    "\n",
    "    pos_acc = tp / sum(y_true)\n",
    "    neg_acc = tn / (len(y_pred) - sum(y_pred)) # [y_true=0 & y_pred=0] / y_pred=0\n",
    "    accuracy = (tp+tn)/(tn+fp+fn+tp)\n",
    "    \n",
    "    recall = tp / (tp+fn)\n",
    "    precision = tp / (tp+fp)\n",
    "    f1 = 2*precision*recall / (precision+recall)\n",
    "    \n",
    "    roc_auc = roc_auc_score(y_true, y_prob)\n",
    "    prec, reca, _ = precision_recall_curve(y_true, y_prob)\n",
    "    aupr = auc(reca, prec)\n",
    "\n",
    "    return (y_true, y_pred, y_prob), (accuracy, precision, recall, f1, roc_auc, aupr, pos_acc, neg_acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "from tqdm import tqdm\n",
    "import scipy.sparse as sp\n",
    "from copy import deepcopy\n",
    "import warnings \n",
    "import os\n",
    "from sklearn.model_selection import KFold\n",
    "import json\n",
    "warnings.filterwarnings(\"ignore\") \n",
    "\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from sklearn.metrics import roc_auc_score, auc\n",
    "from sklearn.metrics import precision_recall_fscore_support\n",
    "from sklearn.metrics import precision_recall_curve\n",
    "from sklearn.metrics import classification_report\n",
    "from collections import Counter\n",
    "from sklearn.model_selection import KFold\n",
    "import argparse"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main(args, g, features, labels, train_idx):\n",
    "\n",
    "    if args.gpu >= 0:\n",
    "        cuda = True\n",
    "        device = torch.device('cuda:%d' % args.gpu)\n",
    "    else:\n",
    "        cuda = False\n",
    "        device = torch.device('cpu')\n",
    "    \n",
    "    num_nodes = g.number_of_nodes()\n",
    "    train_mask = np.zeros(num_nodes, dtype = 'int64')\n",
    "    train_mask[train_idx] = 1\n",
    "    test_mask = 1 - train_mask\n",
    "    print(Counter(train_mask), Counter(test_mask))\n",
    "    train_mask = torch.BoolTensor(train_mask)\n",
    "    test_mask = torch.BoolTensor(test_mask)\n",
    "\n",
    "    g.ndata['feat'] = features\n",
    "    g.ndata['label'] = labels\n",
    "    g.ndata['train_mask'] = train_mask\n",
    "    g.ndata['test_mask'] = test_mask\n",
    "    \n",
    "    g = g.to(device)\n",
    "        \n",
    "    in_feats = features.shape[1]\n",
    "    n_classes = 2\n",
    "    n_edges = g.number_of_edges()\n",
    "\n",
    "    features, labels = features.to(device), labels.to(device)\n",
    "    \n",
    "    print(\"\"\"----Data statistics------'\n",
    "      #Edges %d\n",
    "      #Classes %d\n",
    "      #Train samples %d\n",
    "      #Test samples %d\"\"\" %\n",
    "          (n_edges, n_classes,\n",
    "          train_mask.int().sum().item(),\n",
    "          test_mask.int().sum().item()))\n",
    "\n",
    "    # normalization\n",
    "    degs = g.in_degrees().float()\n",
    "    norm = torch.pow(degs, -0.5)\n",
    "    norm[torch.isinf(norm)] = 0\n",
    "    if cuda:\n",
    "        norm = norm.cuda()\n",
    "    g.ndata['norm'] = norm.unsqueeze(1)\n",
    "\n",
    "    # create GCN model\n",
    "    model = GCN(g,\n",
    "                in_feats,\n",
    "                args.n_hidden,\n",
    "                n_classes,\n",
    "                args.n_layers,\n",
    "                F.relu,\n",
    "                args.dropout)\n",
    "\n",
    "    if cuda:\n",
    "        model.cuda()\n",
    "    loss_fcn = torch.nn.CrossEntropyLoss()\n",
    "\n",
    "    # use optimizer\n",
    "    optimizer = torch.optim.Adamax(model.parameters(),\n",
    "                                 lr=args.lr,\n",
    "                                 weight_decay=args.weight_decay)\n",
    "\n",
    "    # initialize graph\n",
    "    dur = []\n",
    "    for epoch in range(args.n_epochs):\n",
    "        model.train()\n",
    "\n",
    "        t0 = time.time()\n",
    "        # forward\n",
    "        logits = model(features)\n",
    "        loss = loss_fcn(logits[train_mask], labels[train_mask])\n",
    "        loss.requires_grad_(True)\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        dur.append(time.time() - t0)\n",
    "        \n",
    "        print('=====Epoch {} | Time(s) {:.4f} | Loss {:.4f} | ETputs(KTEPS) {:.2f}'.format(epoch, np.mean(dur), loss.item(), n_edges / np.mean(dur) / 1000))\n",
    "        \n",
    "        ys_train, metrics_train = evaluate(model, features, labels, train_mask)\n",
    "        ys_test, metrics_test = evaluate(model, features, labels, test_mask)\n",
    "        \n",
    "    return ys_train, metrics_train, ys_test, metrics_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run(args, task, isbalance, balance, n_neigh):\n",
    "    pwd = r'../../data/0_data/'\n",
    "    \n",
    "    if isbalance:\n",
    "        node_feature_label = pd.read_csv(pwd + 'node_feature_label_.csv', index_col = 0)\n",
    "    \n",
    "    train_test_id_idx = np.load(r'../../data/task_' + task + balance + '__testlabel0_knn_edge_train_test_index_all.npz', allow_pickle = True)\n",
    "    train_index_all = train_test_id_idx['train_index_all']\n",
    "    test_index_all = train_test_id_idx['test_index_all']\n",
    "    train_idx = train_index_all[0]\n",
    "    test_idx = train_index_all[0]\n",
    "    \n",
    "    num_nodes = node_feature_label.shape[0]\n",
    "    features = torch.FloatTensor(np.array(node_feature_label.iloc[:, 3:]))\n",
    "    labels = torch.LongTensor(np.array(node_feature_label['label']))\n",
    "    \n",
    "    fold = 0\n",
    "    for train_idx, test_idx in zip(train_index_all, test_index_all):\n",
    "        \n",
    "        knn_graph_file = 'task_' + task + balance + '__testlabel0_knn' + str(n_neigh) + 'neighbors_edge__fold' + str(fold) + '.npz'\n",
    "        knn_neighbors_graph = sp.load_npz(pwd + knn_graph_file)\n",
    "\n",
    "        edge_src = knn_neighbors_graph.nonzero()[0]\n",
    "        edge_dst = knn_neighbors_graph.nonzero()[1]\n",
    "\n",
    "        g = dgl.DGLGraph()\n",
    "        g.add_nodes(num_nodes)\n",
    "        g.add_edges(edge_src, edge_dst)\n",
    "        g = dgl.add_self_loop(g)\n",
    "\n",
    "        fold += 1\n",
    "    ys_train, metrics_train, ys_test, metrics_test = main(args, g, features, labels, train_idx)\n",
    "    \n",
    "    return node_feature_label, train_index_all, test_index_all, knn_neighbors_graph, g, ys_train, metrics_train, ys_test, metrics_test"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# RUN"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Namespace(dataset=None, dropout=0.3, gpu=-1, lr=0.003, n_epochs=250, n_hidden=512, n_layers=3, weight_decay=0.0005)\n",
      "************** isbalance = True | task = Tpe | n_neigh = 10\n",
      "=====Fold 4=============================================\n",
      "Graph(num_nodes=47116, num_edges=518276,\n",
      "      ndata_schemes={}\n",
      "      edata_schemes={})\n",
      "Counter({1: 41104, 0: 6012}) Counter({0: 41104, 1: 6012})\n",
      "----Data statistics------'\n",
      "      #Edges 518276\n",
      "      #Classes 2\n",
      "      #Train samples 41104\n",
      "      #Test samples 6012\n",
      "=====Epoch 0 | Time(s) 63.8850 | Loss 1.0194 | ETputs(KTEPS) 8.11\n",
      "=====Epoch 1 | Time(s) 61.5610 | Loss 31.4834 | ETputs(KTEPS) 8.42\n",
      "=====Epoch 2 | Time(s) 60.0910 | Loss 11.7897 | ETputs(KTEPS) 8.62\n",
      "=====Epoch 3 | Time(s) 59.5478 | Loss 3.2497 | ETputs(KTEPS) 8.70\n",
      "=====Epoch 4 | Time(s) 59.1316 | Loss 1.0843 | ETputs(KTEPS) 8.76\n",
      "=====Epoch 5 | Time(s) 59.0438 | Loss 0.7150 | ETputs(KTEPS) 8.78\n",
      "=====Epoch 6 | Time(s) 59.3420 | Loss 0.7163 | ETputs(KTEPS) 8.73\n",
      "=====Epoch 7 | Time(s) 59.5242 | Loss 0.7499 | ETputs(KTEPS) 8.71\n",
      "=====Epoch 8 | Time(s) 59.5324 | Loss 0.7531 | ETputs(KTEPS) 8.71\n",
      "=====Epoch 9 | Time(s) 59.2484 | Loss 0.7321 | ETputs(KTEPS) 8.75\n",
      "=====Epoch 10 | Time(s) 59.0839 | Loss 0.7113 | ETputs(KTEPS) 8.77\n",
      "=====Epoch 11 | Time(s) 61.1440 | Loss 0.7002 | ETputs(KTEPS) 8.48\n",
      "=====Epoch 12 | Time(s) 63.2430 | Loss 0.6951 | ETputs(KTEPS) 8.19\n",
      "=====Epoch 13 | Time(s) 64.9420 | Loss 0.6952 | ETputs(KTEPS) 7.98\n",
      "=====Epoch 14 | Time(s) 65.5834 | Loss 0.6972 | ETputs(KTEPS) 7.90\n",
      "=====Epoch 15 | Time(s) 65.2875 | Loss 0.6987 | ETputs(KTEPS) 7.94\n",
      "=====Epoch 16 | Time(s) 64.8276 | Loss 0.6989 | ETputs(KTEPS) 7.99\n",
      "=====Epoch 17 | Time(s) 64.5288 | Loss 0.6968 | ETputs(KTEPS) 8.03\n",
      "=====Epoch 18 | Time(s) 64.2391 | Loss 0.6932 | ETputs(KTEPS) 8.07\n",
      "=====Epoch 19 | Time(s) 63.8786 | Loss 0.6904 | ETputs(KTEPS) 8.11\n",
      "=====Epoch 20 | Time(s) 64.7805 | Loss 0.6883 | ETputs(KTEPS) 8.00\n",
      "=====Epoch 21 | Time(s) 64.7610 | Loss 0.6867 | ETputs(KTEPS) 8.00\n",
      "=====Epoch 22 | Time(s) 65.1044 | Loss 0.6879 | ETputs(KTEPS) 7.96\n",
      "=====Epoch 23 | Time(s) 65.4078 | Loss 0.6866 | ETputs(KTEPS) 7.92\n",
      "=====Epoch 24 | Time(s) 65.5838 | Loss 0.6858 | ETputs(KTEPS) 7.90\n",
      "=====Epoch 25 | Time(s) 65.7560 | Loss 0.6846 | ETputs(KTEPS) 7.88\n",
      "=====Epoch 26 | Time(s) 65.9912 | Loss 0.6845 | ETputs(KTEPS) 7.85\n",
      "=====Epoch 27 | Time(s) 66.5177 | Loss 0.6852 | ETputs(KTEPS) 7.79\n",
      "=====Epoch 28 | Time(s) 66.9140 | Loss 0.6832 | ETputs(KTEPS) 7.75\n",
      "=====Epoch 29 | Time(s) 67.0168 | Loss 0.6825 | ETputs(KTEPS) 7.73\n",
      "=====Epoch 30 | Time(s) 67.1900 | Loss 0.6823 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 31 | Time(s) 67.4461 | Loss 0.6806 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 32 | Time(s) 67.5810 | Loss 0.6806 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 33 | Time(s) 67.5871 | Loss 0.6808 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 34 | Time(s) 67.5231 | Loss 0.6787 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 35 | Time(s) 67.4561 | Loss 0.6796 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 36 | Time(s) 67.3848 | Loss 0.6785 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 37 | Time(s) 67.2986 | Loss 0.6776 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 38 | Time(s) 67.3220 | Loss 0.6780 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 39 | Time(s) 67.4228 | Loss 0.6748 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 40 | Time(s) 67.3840 | Loss 0.6750 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 41 | Time(s) 67.4314 | Loss 0.6739 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 42 | Time(s) 67.4933 | Loss 0.6750 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 43 | Time(s) 67.5041 | Loss 0.6739 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 44 | Time(s) 67.4775 | Loss 0.6736 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 45 | Time(s) 67.4391 | Loss 0.6748 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 46 | Time(s) 67.4230 | Loss 0.6740 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 47 | Time(s) 67.4063 | Loss 0.6740 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 48 | Time(s) 67.5210 | Loss 0.6741 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 49 | Time(s) 67.6301 | Loss 0.6712 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 50 | Time(s) 67.5988 | Loss 0.6726 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 51 | Time(s) 67.5483 | Loss 0.6726 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 52 | Time(s) 67.5209 | Loss 0.6711 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 53 | Time(s) 67.5631 | Loss 0.6707 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 54 | Time(s) 67.6410 | Loss 0.6719 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 55 | Time(s) 67.6136 | Loss 0.6716 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 56 | Time(s) 67.5700 | Loss 0.6700 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 57 | Time(s) 67.5130 | Loss 0.6709 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 58 | Time(s) 67.4568 | Loss 0.6708 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 59 | Time(s) 67.4086 | Loss 0.6692 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 60 | Time(s) 67.3732 | Loss 0.6687 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 61 | Time(s) 67.3300 | Loss 0.6680 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 62 | Time(s) 67.3363 | Loss 0.6687 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 63 | Time(s) 67.4075 | Loss 0.6668 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 64 | Time(s) 67.4268 | Loss 0.6677 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 65 | Time(s) 67.3908 | Loss 0.6676 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 66 | Time(s) 67.3481 | Loss 0.6673 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 67 | Time(s) 67.3161 | Loss 0.6663 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 68 | Time(s) 67.2797 | Loss 0.6675 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 69 | Time(s) 67.2761 | Loss 0.6654 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 70 | Time(s) 67.3682 | Loss 0.6642 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 71 | Time(s) 67.4597 | Loss 0.6670 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 72 | Time(s) 67.4975 | Loss 0.6666 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 73 | Time(s) 67.4923 | Loss 0.6656 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 74 | Time(s) 67.5355 | Loss 0.6640 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 75 | Time(s) 67.5678 | Loss 0.6619 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 76 | Time(s) 67.5790 | Loss 0.6628 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 77 | Time(s) 67.5483 | Loss 0.6635 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 78 | Time(s) 67.5311 | Loss 0.6638 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 79 | Time(s) 67.5125 | Loss 0.6647 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 80 | Time(s) 67.4738 | Loss 0.6626 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 81 | Time(s) 67.4353 | Loss 0.6626 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 82 | Time(s) 67.3993 | Loss 0.6619 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 83 | Time(s) 67.3641 | Loss 0.6606 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 84 | Time(s) 67.3766 | Loss 0.6595 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 85 | Time(s) 67.4086 | Loss 0.6595 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 86 | Time(s) 67.4375 | Loss 0.6566 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 87 | Time(s) 67.4455 | Loss 0.6572 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 88 | Time(s) 67.4597 | Loss 0.6570 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 89 | Time(s) 67.4479 | Loss 0.6571 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 90 | Time(s) 67.4419 | Loss 0.6557 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 91 | Time(s) 67.4955 | Loss 0.6552 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 92 | Time(s) 67.5623 | Loss 0.6539 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 93 | Time(s) 67.6036 | Loss 0.6545 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 94 | Time(s) 67.6093 | Loss 0.6511 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 95 | Time(s) 67.6113 | Loss 0.6509 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 96 | Time(s) 67.6568 | Loss 0.6528 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 97 | Time(s) 67.7360 | Loss 0.6479 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 98 | Time(s) 67.7453 | Loss 0.6497 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 99 | Time(s) 67.7159 | Loss 0.6480 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 100 | Time(s) 67.7094 | Loss 0.6478 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 101 | Time(s) 67.6598 | Loss 0.6446 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 102 | Time(s) 67.6263 | Loss 0.6458 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 103 | Time(s) 67.5864 | Loss 0.6420 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 104 | Time(s) 67.5340 | Loss 0.6413 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 105 | Time(s) 67.5019 | Loss 0.6425 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 106 | Time(s) 67.5179 | Loss 0.6419 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 107 | Time(s) 67.6916 | Loss 0.6429 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 108 | Time(s) 67.8642 | Loss 0.6386 | ETputs(KTEPS) 7.64\n",
      "=====Epoch 109 | Time(s) 68.0538 | Loss 0.6377 | ETputs(KTEPS) 7.62\n",
      "=====Epoch 110 | Time(s) 68.2620 | Loss 0.6385 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 111 | Time(s) 68.4263 | Loss 0.6359 | ETputs(KTEPS) 7.57\n",
      "=====Epoch 112 | Time(s) 68.5915 | Loss 0.6342 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 113 | Time(s) 68.5927 | Loss 0.6359 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 114 | Time(s) 68.5592 | Loss 0.6362 | ETputs(KTEPS) 7.56\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=====Epoch 115 | Time(s) 68.5172 | Loss 0.6343 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 116 | Time(s) 68.5241 | Loss 0.6321 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 117 | Time(s) 68.5549 | Loss 0.6332 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 118 | Time(s) 68.5326 | Loss 0.6300 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 119 | Time(s) 68.4916 | Loss 0.6310 | ETputs(KTEPS) 7.57\n",
      "=====Epoch 120 | Time(s) 68.4539 | Loss 0.6315 | ETputs(KTEPS) 7.57\n",
      "=====Epoch 121 | Time(s) 68.4118 | Loss 0.6297 | ETputs(KTEPS) 7.58\n",
      "=====Epoch 122 | Time(s) 68.3701 | Loss 0.6281 | ETputs(KTEPS) 7.58\n",
      "=====Epoch 123 | Time(s) 68.3276 | Loss 0.6246 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 124 | Time(s) 68.2900 | Loss 0.6294 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 125 | Time(s) 68.2851 | Loss 0.6245 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 126 | Time(s) 68.2865 | Loss 0.6270 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 127 | Time(s) 68.2919 | Loss 0.6234 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 128 | Time(s) 68.2650 | Loss 0.6247 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 129 | Time(s) 68.2297 | Loss 0.6235 | ETputs(KTEPS) 7.60\n",
      "=====Epoch 130 | Time(s) 68.1934 | Loss 0.6209 | ETputs(KTEPS) 7.60\n",
      "=====Epoch 131 | Time(s) 68.1656 | Loss 0.6229 | ETputs(KTEPS) 7.60\n",
      "=====Epoch 132 | Time(s) 68.1559 | Loss 0.6208 | ETputs(KTEPS) 7.60\n",
      "=====Epoch 133 | Time(s) 68.0850 | Loss 0.6205 | ETputs(KTEPS) 7.61\n",
      "=====Epoch 134 | Time(s) 68.0087 | Loss 0.6174 | ETputs(KTEPS) 7.62\n",
      "=====Epoch 135 | Time(s) 67.9223 | Loss 0.6163 | ETputs(KTEPS) 7.63\n",
      "=====Epoch 136 | Time(s) 67.8878 | Loss 0.6160 | ETputs(KTEPS) 7.63\n",
      "=====Epoch 137 | Time(s) 67.8520 | Loss 0.6167 | ETputs(KTEPS) 7.64\n",
      "=====Epoch 138 | Time(s) 67.7718 | Loss 0.6162 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 139 | Time(s) 67.6956 | Loss 0.6170 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 140 | Time(s) 67.6122 | Loss 0.6120 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 141 | Time(s) 67.5328 | Loss 0.6109 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 142 | Time(s) 67.4462 | Loss 0.6111 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 143 | Time(s) 67.3718 | Loss 0.6097 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 144 | Time(s) 67.2875 | Loss 0.6129 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 145 | Time(s) 67.2024 | Loss 0.6104 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 146 | Time(s) 67.1266 | Loss 0.6059 | ETputs(KTEPS) 7.72\n",
      "=====Epoch 147 | Time(s) 67.1013 | Loss 0.6107 | ETputs(KTEPS) 7.72\n",
      "=====Epoch 148 | Time(s) 67.2316 | Loss 0.6133 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 149 | Time(s) 67.3614 | Loss 0.6064 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 150 | Time(s) 67.3417 | Loss 0.6119 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 151 | Time(s) 67.3443 | Loss 0.6098 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 152 | Time(s) 67.2757 | Loss 0.6094 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 153 | Time(s) 67.1997 | Loss 0.6062 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 154 | Time(s) 67.1648 | Loss 0.6057 | ETputs(KTEPS) 7.72\n",
      "=====Epoch 155 | Time(s) 67.1552 | Loss 0.6087 | ETputs(KTEPS) 7.72\n",
      "=====Epoch 156 | Time(s) 67.1496 | Loss 0.6067 | ETputs(KTEPS) 7.72\n",
      "=====Epoch 157 | Time(s) 67.1947 | Loss 0.6068 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 158 | Time(s) 67.2359 | Loss 0.6033 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 159 | Time(s) 67.2518 | Loss 0.6038 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 160 | Time(s) 67.2507 | Loss 0.6009 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 161 | Time(s) 67.2537 | Loss 0.6003 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 162 | Time(s) 67.2766 | Loss 0.6023 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 163 | Time(s) 67.2989 | Loss 0.5982 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 164 | Time(s) 67.2906 | Loss 0.6040 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 165 | Time(s) 67.2811 | Loss 0.5987 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 166 | Time(s) 67.2720 | Loss 0.6034 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 167 | Time(s) 67.2745 | Loss 0.5974 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 168 | Time(s) 67.2704 | Loss 0.5960 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 169 | Time(s) 67.2655 | Loss 0.5942 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 170 | Time(s) 67.2614 | Loss 0.5964 | ETputs(KTEPS) 7.71\n",
      "=====Epoch 171 | Time(s) 67.2851 | Loss 0.5978 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 172 | Time(s) 67.3119 | Loss 0.5980 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 173 | Time(s) 67.3262 | Loss 0.5938 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 174 | Time(s) 67.3299 | Loss 0.5951 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 175 | Time(s) 67.3456 | Loss 0.5917 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 176 | Time(s) 67.3492 | Loss 0.5939 | ETputs(KTEPS) 7.70\n",
      "=====Epoch 177 | Time(s) 67.3536 | Loss 0.5932 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 178 | Time(s) 67.3946 | Loss 0.5925 | ETputs(KTEPS) 7.69\n",
      "=====Epoch 179 | Time(s) 67.4455 | Loss 0.5969 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 180 | Time(s) 67.4622 | Loss 0.5924 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 181 | Time(s) 67.4662 | Loss 0.5954 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 182 | Time(s) 67.4823 | Loss 0.5952 | ETputs(KTEPS) 7.68\n",
      "=====Epoch 183 | Time(s) 67.5314 | Loss 0.5988 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 184 | Time(s) 67.5558 | Loss 0.5885 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 185 | Time(s) 67.5698 | Loss 0.5902 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 186 | Time(s) 67.5775 | Loss 0.5899 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 187 | Time(s) 67.5789 | Loss 0.5923 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 188 | Time(s) 67.5760 | Loss 0.5902 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 189 | Time(s) 67.5817 | Loss 0.5917 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 190 | Time(s) 67.5890 | Loss 0.5932 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 191 | Time(s) 67.5980 | Loss 0.5885 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 192 | Time(s) 67.6228 | Loss 0.5888 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 193 | Time(s) 67.6495 | Loss 0.5880 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 194 | Time(s) 67.6556 | Loss 0.5895 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 195 | Time(s) 67.6551 | Loss 0.5854 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 196 | Time(s) 67.6389 | Loss 0.5940 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 197 | Time(s) 67.6238 | Loss 0.5862 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 198 | Time(s) 67.6134 | Loss 0.5899 | ETputs(KTEPS) 7.67\n",
      "=====Epoch 199 | Time(s) 67.6670 | Loss 0.5905 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 200 | Time(s) 67.7419 | Loss 0.5839 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 201 | Time(s) 67.7304 | Loss 0.5909 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 202 | Time(s) 67.7203 | Loss 0.5820 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 203 | Time(s) 67.7202 | Loss 0.5881 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 204 | Time(s) 67.7319 | Loss 0.5848 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 205 | Time(s) 67.7445 | Loss 0.5806 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 206 | Time(s) 67.7389 | Loss 0.5829 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 207 | Time(s) 67.7193 | Loss 0.5812 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 208 | Time(s) 67.7104 | Loss 0.5822 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 209 | Time(s) 67.6983 | Loss 0.5806 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 210 | Time(s) 67.6863 | Loss 0.5902 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 211 | Time(s) 67.6782 | Loss 0.5790 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 212 | Time(s) 67.6696 | Loss 0.5819 | ETputs(KTEPS) 7.66\n",
      "=====Epoch 213 | Time(s) 67.7375 | Loss 0.5795 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 214 | Time(s) 67.7635 | Loss 0.5852 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 215 | Time(s) 67.7641 | Loss 0.5838 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 216 | Time(s) 67.7513 | Loss 0.5791 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 217 | Time(s) 67.7418 | Loss 0.5839 | ETputs(KTEPS) 7.65\n",
      "=====Epoch 218 | Time(s) 67.8220 | Loss 0.5833 | ETputs(KTEPS) 7.64\n",
      "=====Epoch 219 | Time(s) 67.9254 | Loss 0.5817 | ETputs(KTEPS) 7.63\n",
      "=====Epoch 220 | Time(s) 68.0281 | Loss 0.5790 | ETputs(KTEPS) 7.62\n",
      "=====Epoch 221 | Time(s) 68.1525 | Loss 0.5849 | ETputs(KTEPS) 7.60\n",
      "=====Epoch 222 | Time(s) 68.2543 | Loss 0.5786 | ETputs(KTEPS) 7.59\n",
      "=====Epoch 223 | Time(s) 68.3604 | Loss 0.5789 | ETputs(KTEPS) 7.58\n",
      "=====Epoch 224 | Time(s) 68.4629 | Loss 0.5791 | ETputs(KTEPS) 7.57\n",
      "=====Epoch 225 | Time(s) 68.5586 | Loss 0.5795 | ETputs(KTEPS) 7.56\n",
      "=====Epoch 226 | Time(s) 68.6590 | Loss 0.5732 | ETputs(KTEPS) 7.55\n",
      "=====Epoch 227 | Time(s) 68.7814 | Loss 0.5784 | ETputs(KTEPS) 7.54\n",
      "=====Epoch 228 | Time(s) 68.8772 | Loss 0.5786 | ETputs(KTEPS) 7.52\n",
      "=====Epoch 229 | Time(s) 68.9714 | Loss 0.5759 | ETputs(KTEPS) 7.51\n",
      "=====Epoch 230 | Time(s) 69.1199 | Loss 0.5736 | ETputs(KTEPS) 7.50\n",
      "=====Epoch 231 | Time(s) 69.2137 | Loss 0.5816 | ETputs(KTEPS) 7.49\n",
      "=====Epoch 232 | Time(s) 69.3182 | Loss 0.5750 | ETputs(KTEPS) 7.48\n",
      "=====Epoch 233 | Time(s) 69.4291 | Loss 0.5780 | ETputs(KTEPS) 7.46\n",
      "=====Epoch 234 | Time(s) 69.5217 | Loss 0.5740 | ETputs(KTEPS) 7.45\n",
      "=====Epoch 235 | Time(s) 69.6162 | Loss 0.5766 | ETputs(KTEPS) 7.44\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=====Epoch 236 | Time(s) 69.6540 | Loss 0.5753 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 237 | Time(s) 69.6490 | Loss 0.5738 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 238 | Time(s) 69.6256 | Loss 0.5733 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 239 | Time(s) 69.6150 | Loss 0.5733 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 240 | Time(s) 69.6270 | Loss 0.5726 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 241 | Time(s) 69.6382 | Loss 0.5782 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 242 | Time(s) 69.6405 | Loss 0.5753 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 243 | Time(s) 69.6780 | Loss 0.5747 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 244 | Time(s) 69.6594 | Loss 0.5742 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 245 | Time(s) 69.6410 | Loss 0.5749 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 246 | Time(s) 69.6253 | Loss 0.5737 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 247 | Time(s) 69.6636 | Loss 0.5750 | ETputs(KTEPS) 7.44\n",
      "=====Epoch 248 | Time(s) 69.7438 | Loss 0.5736 | ETputs(KTEPS) 7.43\n",
      "=====Epoch 249 | Time(s) 69.7826 | Loss 0.5700 | ETputs(KTEPS) 7.43\n"
     ]
    }
   ],
   "source": [
    "# balance data\n",
    "if __name__ == '__main__':\n",
    "    \n",
    "    parser = argparse.ArgumentParser(description='GCN')\n",
    "    register_data_args(parser)\n",
    "    parser.add_argument(\"--dropout\", type=float, default=0.3,\n",
    "            help=\"dropout probability\")\n",
    "    parser.add_argument(\"--gpu\", type=int, default=-1,\n",
    "            help=\"gpu\")\n",
    "    parser.add_argument(\"--lr\", type=float, default=0.003,\n",
    "            help=\"learning rate\")\n",
    "    parser.add_argument(\"--n-epochs\", type=int, default=500,\n",
    "            help=\"number of training epochs\")\n",
    "    parser.add_argument(\"--n-hidden\", type=int, default=512,\n",
    "            help=\"number of hidden gcn units\")\n",
    "    parser.add_argument(\"--n-layers\", type=int, default=3,\n",
    "            help=\"number of hidden gcn layers\")\n",
    "    parser.add_argument(\"--weight-decay\", type=float, default=5e-4,\n",
    "            help=\"Weight for L2 loss\")\n",
    "    args = parser.parse_args(args = [])\n",
    "    print(args)\n",
    "    \n",
    "    for isbalance in [True]:\n",
    "        \n",
    "        if isbalance:\n",
    "            balance = ''\n",
    "            \n",
    "        for task in ['Tp','Tg','Tpe']:\n",
    "            \n",
    "            for n_neigh in [1,3,5,7,10,15]:\n",
    "                \n",
    "                print('************** isbalance = {} | task = {} | n_neigh = {}'.format(isbalance, task, n_neigh))\n",
    "                node_feature_label, train_index_all, test_index_all, \\\n",
    "                knn_neighbors_graph, g, ys_train, metrics_train, ys_test, metrics_test = run(args,\n",
    "                                                                                              task, \n",
    "                                                                                              isbalance, \n",
    "                                                                                              balance, \n",
    "                                                                                              n_neigh)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
