{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:25.999738Z",
     "start_time": "2021-07-05T12:32:24.469966Z"
    }
   },
   "outputs": [],
   "source": [
    "from typing import Union, Tuple, Optional\n",
    "from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType, OptTensor)\n",
    "\n",
    "import torch\n",
    "from torch import Tensor\n",
    "import torch.nn.functional as F\n",
    "from torch.nn import Parameter, Linear\n",
    "from torch_sparse import SparseTensor, set_diag\n",
    "from torch_geometric.nn import GCNConv, GATConv\n",
    "from torch_geometric.nn.conv import MessagePassing\n",
    "from torch_geometric.utils import remove_self_loops, add_self_loops, softmax\n",
    "\n",
    "from torch_geometric.nn.inits  import glorot, zeros\n",
    "from torch_geometric.data import Data\n",
    "\n",
    "from torch_scatter import scatter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:26.032499Z",
     "start_time": "2021-07-05T12:32:26.016392Z"
    }
   },
   "outputs": [],
   "source": [
    "class GGAT(MessagePassing):\n",
    "    def __init__(self, in_channels, out_channels, dropout = 0.0, add_self_loops = True, bias = True, **kwargs):\n",
    "        kwargs.setdefault('aggr', 'add')\n",
    "        super(GGAT, self).__init__(node_dim=0, **kwargs)\n",
    "\n",
    "        self.in_channels = in_channels\n",
    "        self.out_channels = out_channels\n",
    "        self.dropout = dropout\n",
    "        self.add_self_loops = add_self_loops\n",
    "        self.weight = Parameter(torch.Tensor(in_channels, out_channels))\n",
    "        self.att = Parameter(torch.Tensor(out_channels, 1))\n",
    "        self.edge_index = None\n",
    "        self.edge_weight = None\n",
    "        self.x = None\n",
    "        \n",
    "        if bias:\n",
    "            self.bias = Parameter(torch.Tensor(out_channels))\n",
    "        else:\n",
    "            self.register_parameter('bias', None)\n",
    "\n",
    "        self.reset_parameters()\n",
    "        \n",
    "    def reset_parameters(self):\n",
    "        glorot(self.att)\n",
    "        glorot(self.weight)\n",
    "        zeros(self.bias)\n",
    "        \n",
    "    def forward(self, x, edge_index, return_attention_weights=None):\n",
    "        x = x @ self.weight\n",
    "        \n",
    "        if(self.edge_index == None):\n",
    "            self.edge_index = edge_index\n",
    "            \n",
    "        assert self.edge_index != None\n",
    "        h, t = self.edge_index\n",
    "        x_h = x[self.edge_index[0]]\n",
    "        x_t = x[self.edge_index[1]]\n",
    "        x_ht = x_h * x_t\n",
    "        \n",
    "        edge_weight = F.sigmoid(x_ht @ self.att).t().squeeze(0)\n",
    "        sum_weight = scatter(edge_weight, t, reduce=\"sum\").squeeze(0)\n",
    "        \n",
    "        norm = sum_weight[t]\n",
    "        out = self.propagate(self.edge_index, x = x, edge_weight = edge_weight, norm=norm)\n",
    "        self.x = out\n",
    "        return out\n",
    "        \n",
    "    def process_edges(self, x, sample_num, gamma):\n",
    "        \n",
    "        n = 0\n",
    "        x = self.x\n",
    "        \n",
    "        head_index = torch.randperm(self.edge_index.shape[1])\n",
    "        tail_index = torch.randperm(self.edge_index.shape[1])\n",
    "        \n",
    "        new_edges = []\n",
    "        \n",
    "        for i in range(len(head_index)):\n",
    "\n",
    "            head_i = self.edge_index[0][head_index[i]]\n",
    "            tail_i = self.edge_index[1][tail_index[i]]\n",
    "            if(head_i == tail_i or head_index[i] == tail_index[i]):\n",
    "                continue\n",
    "        \n",
    "                \n",
    "            head_feature = x[head_i]\n",
    "            tail_feature = x[tail_i]\n",
    "            \n",
    "#             edge_alpha = F.sigmoid(((head_feature @ self.weight) * (tail_feature @ self.weight)) @ self.att)\n",
    "            edge_alpha = F.sigmoid((head_feature * tail_feature) @ self.att)\n",
    "\n",
    "            if(edge_alpha > gamma):\n",
    "                new_edges.append([head_i, tail_i])\n",
    "                new_edges.append([tail_i, head_i])\n",
    "                n += 1\n",
    "            \n",
    "            if(n == sample_num):\n",
    "                break\n",
    "            \n",
    "        self.edge_index = torch.cat((self.edge_index, torch.LongTensor(new_edges).t().to(device)), 1)\n",
    "        return new_edges\n",
    "        \n",
    "    def message(self, x_j, x_i, edge_weight, norm):\n",
    "        self.edge_weight = edge_weight\n",
    "        edge_weight = F.dropout(edge_weight, p = 0.5, training=self.training)\n",
    "        return x_j if edge_weight is None else edge_weight.div(norm).view(-1, 1) * x_j\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:27.282496Z",
     "start_time": "2021-07-05T12:32:26.047963Z"
    }
   },
   "outputs": [],
   "source": [
    "path = \"data/cora/\"\n",
    "cites = path + \"cora.cites\"\n",
    "content = path + \"cora.content\"\n",
    "\n",
    "# 索引字典，转换到从0开始编码\n",
    "index_dict = dict()\n",
    "# 标签字典\n",
    "label_to_index = dict()\n",
    "\n",
    "features = []\n",
    "labels = []\n",
    "edge_index = []\n",
    "\n",
    "draw_edge_index = []\n",
    "\n",
    "with open(content,\"r\") as f:\n",
    "    nodes = f.readlines()\n",
    "    for node in nodes:\n",
    "        node_info = node.split()\n",
    "        index_dict[int(node_info[0])] = len(index_dict)\n",
    "        features.append([int(i) for i in node_info[1:-1]])\n",
    "        \n",
    "        label_str = node_info[-1]\n",
    "        if(label_str not in label_to_index.keys()):\n",
    "            label_to_index[label_str] = len(label_to_index)\n",
    "        labels.append(label_to_index[label_str])\n",
    "\n",
    "with open(cites,\"r\") as f:\n",
    "    edges = f.readlines()\n",
    "    for edge in edges:\n",
    "        start, end = edge.split()\n",
    "        edge_index.append([index_dict[int(start)],index_dict[int(end)]])\n",
    "        draw_edge_index.append([index_dict[int(start)],index_dict[int(end)]])\n",
    "        edge_index.append([index_dict[int(end)],index_dict[int(start)]])\n",
    "    \n",
    "labels = torch.LongTensor(labels)\n",
    "features = torch.FloatTensor(features)\n",
    "# 行归一化\n",
    "# features = torch.nn.functional.normalize(features, p=1, dim=1)\n",
    "\n",
    "edge_index =  torch.LongTensor(edge_index).t()\n",
    "edge_index, _ = add_self_loops(edge_index,num_nodes=len(index_dict))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:27.385110Z",
     "start_time": "2021-07-05T12:32:27.378999Z"
    }
   },
   "outputs": [],
   "source": [
    "class GATNet(torch.nn.Module):\n",
    "    def __init__(self, num_feature, num_label):\n",
    "        super(GATNet,self).__init__()\n",
    "        self.GAT1 = GATConv(num_feature, 16, heads = 1, concat = True, dropout = 0.6)\n",
    "        self.GAT2 = GATConv(16, num_label, dropout = 0.6)  \n",
    "        \n",
    "    def forward(self, data):\n",
    "        x, edge_index = data.x, data.edge_index\n",
    "        \n",
    "        x = self.GAT1(x, edge_index)\n",
    "        x = F.relu(x)\n",
    "        x = self.GAT2(x, edge_index)\n",
    "        \n",
    "        return F.log_softmax(x, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:27.479241Z",
     "start_time": "2021-07-05T12:32:27.473885Z"
    }
   },
   "outputs": [],
   "source": [
    "class GCNNet(torch.nn.Module):\n",
    "    def __init__(self, num_feature, num_label):\n",
    "        super(GCNNet,self).__init__()\n",
    "        self.GCN1 = GCNConv(num_feature, 16, cached=True)\n",
    "        self.GCN2 = GCNConv(16, num_label, cached=True)  \n",
    "        self.dropout = torch.nn.Dropout(p=0.5)\n",
    "        \n",
    "    def forward(self, data):\n",
    "        x, edge_index = data.x, data.edge_index\n",
    "        \n",
    "        x = self.GCN1(x, edge_index)\n",
    "        x = F.relu(x)\n",
    "        x = self.dropout(x)\n",
    "        x = self.GCN2(x, edge_index)\n",
    "        \n",
    "        return F.log_softmax(x, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:27.577917Z",
     "start_time": "2021-07-05T12:32:27.572730Z"
    }
   },
   "outputs": [],
   "source": [
    "class GGNet(torch.nn.Module):\n",
    "    def __init__(self, num_feature, num_label):\n",
    "        super(GGNet,self).__init__()\n",
    "        self.GGAT1 = GGAT(num_feature, 16)\n",
    "        self.GGAT2 = GGAT(16, num_label)\n",
    "#         self.dropout = torch.nn.Dropout(p=0.5)\n",
    "\n",
    "        \n",
    "    def forward(self, data):\n",
    "        x, edge_index = data.x, data.edge_index\n",
    "\n",
    "        x = self.GGAT1(x, edge_index)\n",
    "        x = F.relu(x)\n",
    "#         x = self.dropout(x)\n",
    "        x = self.GGAT2(x, edge_index)\n",
    "        \n",
    "        return F.log_softmax(x, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:27.671944Z",
     "start_time": "2021-07-05T12:32:27.666252Z"
    }
   },
   "outputs": [],
   "source": [
    "# 固定种子\n",
    "seed = 1234\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)  \n",
    "# np.random.seed(seed)  # Numpy module.\n",
    "# random.seed(seed)  # Python random module.\n",
    "torch.manual_seed(seed)\n",
    "torch.backends.cudnn.benchmark = False\n",
    "torch.backends.cudnn.deterministic = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:32:31.123408Z",
     "start_time": "2021-07-05T12:32:27.760182Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Data(edge_index=[2, 13566], x=[2708, 1433], y=[2708])"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mask = torch.randperm(len(index_dict))\n",
    "\n",
    "# train_mask = mask[:140]\n",
    "# val_mask = mask[140:640]\n",
    "# test_mask = mask[1708:2708]\n",
    "\n",
    "train_mask = mask[:1625]\n",
    "val_mask = mask[1626:2167]\n",
    "test_mask = mask[2167:]\n",
    "\n",
    "device = torch.device('cuda:2' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "cora = Data(x = features, edge_index = edge_index.contiguous(), y = labels).to(device)\n",
    "cora"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:51:26.589370Z",
     "start_time": "2021-07-05T12:51:24.639901Z"
    },
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/songwenyu/anaconda3/envs/song/lib/python3.6/site-packages/torch/nn/functional.py:1709: UserWarning: nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\n",
      "  warnings.warn(\"nn.functional.sigmoid is deprecated. Use torch.sigmoid instead.\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 loss: 1.9479 train_accuracy: 0.1723\n",
      "epoch: 1 loss: 1.9251 train_accuracy: 0.2086\n",
      "epoch: 2 loss: 1.9033 train_accuracy: 0.2689\n",
      "epoch: 3 loss: 1.8830 train_accuracy: 0.3200\n",
      "epoch: 4 loss: 1.8623 train_accuracy: 0.3514\n",
      "epoch: 5 loss: 1.8394 train_accuracy: 0.3643\n",
      "epoch: 6 loss: 1.8217 train_accuracy: 0.3637\n",
      "epoch: 7 loss: 1.7970 train_accuracy: 0.3914\n",
      "epoch: 8 loss: 1.7777 train_accuracy: 0.3846\n",
      "epoch: 9 loss: 1.7573 train_accuracy: 0.3951\n",
      "val_accuracy: 0.4214\n",
      "epoch: 10 loss: 1.7293 train_accuracy: 0.4246\n",
      "epoch: 11 loss: 1.7097 train_accuracy: 0.4369\n",
      "epoch: 12 loss: 1.6854 train_accuracy: 0.4615\n",
      "epoch: 13 loss: 1.6599 train_accuracy: 0.4671\n",
      "epoch: 14 loss: 1.6388 train_accuracy: 0.4738\n",
      "epoch: 15 loss: 1.6110 train_accuracy: 0.4911\n",
      "epoch: 16 loss: 1.6027 train_accuracy: 0.4997\n",
      "epoch: 17 loss: 1.5563 train_accuracy: 0.5194\n",
      "epoch: 18 loss: 1.5451 train_accuracy: 0.5225\n",
      "epoch: 19 loss: 1.5205 train_accuracy: 0.5372\n",
      "val_accuracy: 0.5786\n",
      "epoch: 20 loss: 1.4901 train_accuracy: 0.5557\n",
      "epoch: 21 loss: 1.4843 train_accuracy: 0.5440\n",
      "epoch: 22 loss: 1.4578 train_accuracy: 0.5705\n",
      "epoch: 23 loss: 1.4398 train_accuracy: 0.5828\n",
      "epoch: 24 loss: 1.4285 train_accuracy: 0.5963\n",
      "epoch: 25 loss: 1.4088 train_accuracy: 0.6074\n",
      "epoch: 26 loss: 1.4118 train_accuracy: 0.6240\n",
      "epoch: 27 loss: 1.3790 train_accuracy: 0.6308\n",
      "epoch: 28 loss: 1.3480 train_accuracy: 0.6505\n",
      "epoch: 29 loss: 1.3478 train_accuracy: 0.6585\n",
      "val_accuracy: 0.7227\n",
      "epoch: 30 loss: 1.3229 train_accuracy: 0.6978\n",
      "epoch: 31 loss: 1.3106 train_accuracy: 0.6868\n",
      "epoch: 32 loss: 1.2822 train_accuracy: 0.7095\n",
      "epoch: 33 loss: 1.2543 train_accuracy: 0.7058\n",
      "epoch: 34 loss: 1.2532 train_accuracy: 0.7243\n",
      "epoch: 35 loss: 1.2390 train_accuracy: 0.7138\n",
      "epoch: 36 loss: 1.2074 train_accuracy: 0.7440\n",
      "epoch: 37 loss: 1.2304 train_accuracy: 0.7477\n",
      "epoch: 38 loss: 1.1932 train_accuracy: 0.7520\n",
      "epoch: 39 loss: 1.1939 train_accuracy: 0.7545\n",
      "val_accuracy: 0.8207\n",
      "epoch: 40 loss: 1.1469 train_accuracy: 0.7686\n",
      "epoch: 41 loss: 1.1343 train_accuracy: 0.7637\n",
      "epoch: 42 loss: 1.1269 train_accuracy: 0.7766\n",
      "epoch: 43 loss: 1.1126 train_accuracy: 0.7828\n",
      "epoch: 44 loss: 1.1128 train_accuracy: 0.7791\n",
      "epoch: 45 loss: 1.0851 train_accuracy: 0.7840\n",
      "epoch: 46 loss: 1.0496 train_accuracy: 0.7938\n",
      "epoch: 47 loss: 1.0730 train_accuracy: 0.7828\n",
      "epoch: 48 loss: 1.0460 train_accuracy: 0.7797\n",
      "epoch: 49 loss: 1.0320 train_accuracy: 0.7994\n",
      "val_accuracy: 0.8503\n",
      "epoch: 50 loss: 1.0062 train_accuracy: 0.7945\n",
      "epoch: 51 loss: 1.0066 train_accuracy: 0.7969\n",
      "epoch: 52 loss: 1.0148 train_accuracy: 0.8111\n",
      "epoch: 53 loss: 0.9688 train_accuracy: 0.8105\n",
      "epoch: 54 loss: 0.9845 train_accuracy: 0.8049\n",
      "epoch: 55 loss: 0.9423 train_accuracy: 0.8123\n",
      "epoch: 56 loss: 0.9241 train_accuracy: 0.8172\n",
      "epoch: 57 loss: 0.9103 train_accuracy: 0.8049\n",
      "epoch: 58 loss: 0.9369 train_accuracy: 0.8006\n",
      "epoch: 59 loss: 0.9217 train_accuracy: 0.8092\n",
      "val_accuracy: 0.8669\n",
      "epoch: 60 loss: 0.9106 train_accuracy: 0.8074\n",
      "epoch: 61 loss: 0.9115 train_accuracy: 0.8098\n",
      "epoch: 62 loss: 0.8665 train_accuracy: 0.8185\n",
      "epoch: 63 loss: 0.8836 train_accuracy: 0.8185\n",
      "epoch: 64 loss: 0.8812 train_accuracy: 0.8043\n",
      "epoch: 65 loss: 0.8691 train_accuracy: 0.8215\n",
      "epoch: 66 loss: 0.8516 train_accuracy: 0.8160\n",
      "epoch: 67 loss: 0.8380 train_accuracy: 0.8234\n",
      "epoch: 68 loss: 0.8422 train_accuracy: 0.8203\n",
      "epoch: 69 loss: 0.8267 train_accuracy: 0.8191\n",
      "val_accuracy: 0.8725\n",
      "epoch: 70 loss: 0.8289 train_accuracy: 0.8209\n",
      "epoch: 71 loss: 0.8073 train_accuracy: 0.8357\n",
      "epoch: 72 loss: 0.7948 train_accuracy: 0.8320\n",
      "epoch: 73 loss: 0.7854 train_accuracy: 0.8203\n",
      "epoch: 74 loss: 0.7994 train_accuracy: 0.8234\n",
      "epoch: 75 loss: 0.8025 train_accuracy: 0.8203\n",
      "epoch: 76 loss: 0.7830 train_accuracy: 0.8129\n",
      "epoch: 77 loss: 0.7774 train_accuracy: 0.8326\n",
      "epoch: 78 loss: 0.7589 train_accuracy: 0.8178\n",
      "epoch: 79 loss: 0.7725 train_accuracy: 0.8234\n",
      "val_accuracy: 0.8762\n",
      "epoch: 80 loss: 0.7400 train_accuracy: 0.8289\n",
      "epoch: 81 loss: 0.7488 train_accuracy: 0.8283\n",
      "epoch: 82 loss: 0.7288 train_accuracy: 0.8431\n",
      "epoch: 83 loss: 0.7573 train_accuracy: 0.8209\n",
      "epoch: 84 loss: 0.7201 train_accuracy: 0.8265\n",
      "epoch: 85 loss: 0.7381 train_accuracy: 0.8302\n",
      "epoch: 86 loss: 0.7274 train_accuracy: 0.8326\n",
      "epoch: 87 loss: 0.7309 train_accuracy: 0.8240\n",
      "epoch: 88 loss: 0.7015 train_accuracy: 0.8375\n",
      "epoch: 89 loss: 0.7060 train_accuracy: 0.8320\n",
      "val_accuracy: 0.8835\n",
      "epoch: 90 loss: 0.7192 train_accuracy: 0.8295\n",
      "epoch: 91 loss: 0.7252 train_accuracy: 0.8308\n",
      "epoch: 92 loss: 0.6774 train_accuracy: 0.8388\n",
      "epoch: 93 loss: 0.6620 train_accuracy: 0.8492\n",
      "epoch: 94 loss: 0.6504 train_accuracy: 0.8345\n",
      "epoch: 95 loss: 0.7025 train_accuracy: 0.8308\n",
      "epoch: 96 loss: 0.6738 train_accuracy: 0.8314\n",
      "epoch: 97 loss: 0.6637 train_accuracy: 0.8375\n",
      "epoch: 98 loss: 0.6574 train_accuracy: 0.8289\n",
      "epoch: 99 loss: 0.6329 train_accuracy: 0.8585\n",
      "val_accuracy: 0.8817\n",
      "epoch: 100 loss: 0.6534 train_accuracy: 0.8295\n",
      "epoch: 101 loss: 0.6529 train_accuracy: 0.8425\n",
      "epoch: 102 loss: 0.6407 train_accuracy: 0.8443\n",
      "epoch: 103 loss: 0.6151 train_accuracy: 0.8517\n",
      "epoch: 104 loss: 0.6563 train_accuracy: 0.8234\n",
      "epoch: 105 loss: 0.6450 train_accuracy: 0.8406\n",
      "epoch: 106 loss: 0.6379 train_accuracy: 0.8363\n",
      "epoch: 107 loss: 0.6504 train_accuracy: 0.8357\n",
      "epoch: 108 loss: 0.6147 train_accuracy: 0.8498\n",
      "epoch: 109 loss: 0.6659 train_accuracy: 0.8363\n",
      "val_accuracy: 0.8762\n",
      "epoch: 110 loss: 0.6267 train_accuracy: 0.8382\n",
      "epoch: 111 loss: 0.6428 train_accuracy: 0.8418\n",
      "epoch: 112 loss: 0.6586 train_accuracy: 0.8326\n",
      "epoch: 113 loss: 0.6206 train_accuracy: 0.8418\n",
      "epoch: 114 loss: 0.6642 train_accuracy: 0.8332\n",
      "epoch: 115 loss: 0.6442 train_accuracy: 0.8271\n",
      "epoch: 116 loss: 0.6369 train_accuracy: 0.8363\n",
      "epoch: 117 loss: 0.6094 train_accuracy: 0.8326\n",
      "epoch: 118 loss: 0.6438 train_accuracy: 0.8345\n",
      "epoch: 119 loss: 0.6258 train_accuracy: 0.8271\n",
      "val_accuracy: 0.8817\n",
      "epoch: 120 loss: 0.6071 train_accuracy: 0.8375\n",
      "epoch: 121 loss: 0.6042 train_accuracy: 0.8443\n",
      "epoch: 122 loss: 0.6250 train_accuracy: 0.8283\n",
      "epoch: 123 loss: 0.6145 train_accuracy: 0.8308\n",
      "epoch: 124 loss: 0.5953 train_accuracy: 0.8486\n",
      "epoch: 125 loss: 0.5871 train_accuracy: 0.8394\n",
      "epoch: 126 loss: 0.6347 train_accuracy: 0.8326\n",
      "epoch: 127 loss: 0.5658 train_accuracy: 0.8560\n",
      "epoch: 128 loss: 0.6080 train_accuracy: 0.8449\n",
      "epoch: 129 loss: 0.6080 train_accuracy: 0.8357\n",
      "val_accuracy: 0.8762\n",
      "epoch: 130 loss: 0.6226 train_accuracy: 0.8197\n",
      "epoch: 131 loss: 0.6053 train_accuracy: 0.8443\n",
      "epoch: 132 loss: 0.5933 train_accuracy: 0.8338\n",
      "epoch: 133 loss: 0.5784 train_accuracy: 0.8443\n",
      "epoch: 134 loss: 0.5763 train_accuracy: 0.8437\n",
      "epoch: 135 loss: 0.5556 train_accuracy: 0.8498\n",
      "epoch: 136 loss: 0.5790 train_accuracy: 0.8511\n",
      "epoch: 137 loss: 0.6207 train_accuracy: 0.8375\n",
      "epoch: 138 loss: 0.5948 train_accuracy: 0.8240\n",
      "epoch: 139 loss: 0.5739 train_accuracy: 0.8462\n",
      "val_accuracy: 0.8854\n",
      "epoch: 140 loss: 0.5718 train_accuracy: 0.8345\n",
      "epoch: 141 loss: 0.5887 train_accuracy: 0.8449\n",
      "epoch: 142 loss: 0.6131 train_accuracy: 0.8308\n",
      "epoch: 143 loss: 0.5719 train_accuracy: 0.8443\n",
      "epoch: 144 loss: 0.5814 train_accuracy: 0.8412\n",
      "epoch: 145 loss: 0.5876 train_accuracy: 0.8425\n",
      "epoch: 146 loss: 0.5639 train_accuracy: 0.8425\n",
      "epoch: 147 loss: 0.5715 train_accuracy: 0.8535\n",
      "epoch: 148 loss: 0.5673 train_accuracy: 0.8474\n",
      "epoch: 149 loss: 0.5837 train_accuracy: 0.8369\n",
      "val_accuracy: 0.8780\n",
      "epoch: 150 loss: 0.5581 train_accuracy: 0.8443\n",
      "epoch: 151 loss: 0.5638 train_accuracy: 0.8597\n",
      "epoch: 152 loss: 0.5680 train_accuracy: 0.8425\n",
      "epoch: 153 loss: 0.5482 train_accuracy: 0.8474\n",
      "epoch: 154 loss: 0.5529 train_accuracy: 0.8480\n",
      "epoch: 155 loss: 0.5744 train_accuracy: 0.8455\n",
      "epoch: 156 loss: 0.5460 train_accuracy: 0.8480\n",
      "epoch: 157 loss: 0.5371 train_accuracy: 0.8511\n",
      "epoch: 158 loss: 0.5768 train_accuracy: 0.8357\n",
      "epoch: 159 loss: 0.5337 train_accuracy: 0.8548\n",
      "val_accuracy: 0.8780\n",
      "epoch: 160 loss: 0.5716 train_accuracy: 0.8400\n",
      "epoch: 161 loss: 0.5564 train_accuracy: 0.8492\n",
      "epoch: 162 loss: 0.5708 train_accuracy: 0.8431\n",
      "epoch: 163 loss: 0.5470 train_accuracy: 0.8455\n",
      "epoch: 164 loss: 0.5789 train_accuracy: 0.8308\n",
      "epoch: 165 loss: 0.5476 train_accuracy: 0.8462\n",
      "epoch: 166 loss: 0.5602 train_accuracy: 0.8363\n",
      "epoch: 167 loss: 0.5508 train_accuracy: 0.8400\n",
      "epoch: 168 loss: 0.5403 train_accuracy: 0.8492\n",
      "epoch: 169 loss: 0.5332 train_accuracy: 0.8437\n",
      "val_accuracy: 0.8762\n",
      "epoch: 170 loss: 0.5302 train_accuracy: 0.8486\n",
      "epoch: 171 loss: 0.5476 train_accuracy: 0.8468\n",
      "epoch: 172 loss: 0.5804 train_accuracy: 0.8338\n",
      "epoch: 173 loss: 0.5302 train_accuracy: 0.8523\n",
      "epoch: 174 loss: 0.5576 train_accuracy: 0.8437\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 175 loss: 0.5419 train_accuracy: 0.8388\n",
      "epoch: 176 loss: 0.5428 train_accuracy: 0.8468\n",
      "epoch: 177 loss: 0.5556 train_accuracy: 0.8437\n",
      "epoch: 178 loss: 0.5535 train_accuracy: 0.8332\n",
      "epoch: 179 loss: 0.5324 train_accuracy: 0.8548\n",
      "val_accuracy: 0.8799\n",
      "epoch: 180 loss: 0.5274 train_accuracy: 0.8486\n",
      "epoch: 181 loss: 0.5100 train_accuracy: 0.8406\n",
      "epoch: 182 loss: 0.4990 train_accuracy: 0.8652\n",
      "epoch: 183 loss: 0.5580 train_accuracy: 0.8412\n",
      "epoch: 184 loss: 0.5244 train_accuracy: 0.8443\n",
      "epoch: 185 loss: 0.5181 train_accuracy: 0.8375\n",
      "epoch: 186 loss: 0.5325 train_accuracy: 0.8511\n",
      "epoch: 187 loss: 0.5234 train_accuracy: 0.8542\n",
      "epoch: 188 loss: 0.5548 train_accuracy: 0.8283\n",
      "epoch: 189 loss: 0.5390 train_accuracy: 0.8480\n",
      "val_accuracy: 0.8817\n",
      "epoch: 190 loss: 0.5549 train_accuracy: 0.8382\n",
      "epoch: 191 loss: 0.5588 train_accuracy: 0.8295\n",
      "epoch: 192 loss: 0.5465 train_accuracy: 0.8375\n",
      "epoch: 193 loss: 0.5118 train_accuracy: 0.8566\n",
      "epoch: 194 loss: 0.5529 train_accuracy: 0.8425\n",
      "epoch: 195 loss: 0.5203 train_accuracy: 0.8474\n",
      "epoch: 196 loss: 0.5060 train_accuracy: 0.8505\n",
      "epoch: 197 loss: 0.5278 train_accuracy: 0.8388\n",
      "epoch: 198 loss: 0.5273 train_accuracy: 0.8443\n",
      "epoch: 199 loss: 0.5038 train_accuracy: 0.8585\n",
      "val_accuracy: 0.8854\n",
      "0.88909426987061 140\n"
     ]
    }
   ],
   "source": [
    "model = GGNet(features.shape[1], len(label_to_index)).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=2e-3, weight_decay=5e-4)\n",
    "\n",
    "val_best = 0\n",
    "test_best = 0\n",
    "epoch_best = 0\n",
    "\n",
    "draw_new_edge_index = []\n",
    "\n",
    "for epoch in range(200):\n",
    "    optimizer.zero_grad()\n",
    "    out = model(cora)\n",
    "    _, pred = out.max(dim=1)\n",
    "    correct = int(pred[train_mask].eq(cora.y[train_mask]).sum().item())\n",
    "    acc = correct / len(train_mask)\n",
    "    loss = F.nll_loss(out[train_mask], cora.y[train_mask])\n",
    "    print('epoch: %d loss: %.4f train_accuracy: %.4f' %(epoch, loss, acc))\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    if((epoch + 1)% 10 == 0):\n",
    "        model.eval()\n",
    "        _, pred = model(cora).max(dim=1)\n",
    "        correct = int(pred[val_mask].eq(cora.y[val_mask]).sum().item())\n",
    "        val_acc = correct / len(val_mask)\n",
    "        print('val_accuracy: {:.4f}'.format(val_acc))\n",
    "        \n",
    "        if(val_acc > val_best):\n",
    "            val_best = val_acc\n",
    "            epoch_best = epoch + 1\n",
    "            correct = int(pred[test_mask].eq(cora.y[test_mask]).sum().item())\n",
    "            test_acc = correct / len(test_mask)\n",
    "            test_best = test_acc\n",
    "    \n",
    "    if ((epoch+2) > 50 and (epoch + 1)% 25 == 0):\n",
    "        draw_new_edge_index.extend(model.GGAT1.process_edges(cora.x, 50, 0.93)) \n",
    "        model.GGAT2.edge_index = model.GGAT1.edge_index\n",
    "    \n",
    "    model.train()\n",
    "        \n",
    "print(test_best,epoch_best)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:33:31.732394Z",
     "start_time": "2021-07-05T12:33:25.703750Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/songwenyu/gnnlab/ggat_pred.html'"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.manifold import TSNE\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import pyecharts.options as opts\n",
    "from pyecharts.charts import Graph\n",
    "\n",
    "from pyecharts.globals import CurrentConfig, NotebookType\n",
    "CurrentConfig.NOTEBOOK_TYPE = NotebookType.NTERACT\n",
    "\n",
    "ts = TSNE(n_components=2)\n",
    "ts.fit_transform(out.to('cpu').detach().numpy())\n",
    "\n",
    "x = ts.embedding_\n",
    "y = cora.y.to('cpu').detach().numpy()\n",
    "\n",
    "colors = ['#191970','#008000','#FF0000','#FFFF00', '#00FFFF', '#C71585', '#00FA9A']\n",
    "\n",
    "nodes = []\n",
    "\n",
    "for i in range(cora.x.shape[0]):\n",
    "    nodes.append(\n",
    "                {\"x\": int(x[i][0]),\n",
    "                 \"y\": int(x[i][1]),\n",
    "                 \"id\": str(i),\n",
    "                 \"name\": str(i),\n",
    "                 \"symbolSize\": 3,\n",
    "                 \"itemStyle\": {\"normal\": {\"color\": colors[y[i]]}}}\n",
    "                )\n",
    "    \n",
    "edges = [\n",
    "    {\"source\": str(edge[0].item()), \"target\": str(edge[1].item())} for edge in draw_new_edge_index\n",
    "]\n",
    "\n",
    "edges = [\n",
    "    {\"source\": str(edge[0]), \"target\": str(edge[1])} for edge in draw_edge_index\n",
    "]\n",
    "\n",
    "(\n",
    "    Graph(init_opts=opts.InitOpts(width=\"1600px\", height=\"800px\"))\n",
    "    .add(\n",
    "        series_name=\"\",\n",
    "        nodes=nodes,\n",
    "        links=edges,\n",
    "        layout=\"none\",\n",
    "        is_roam=True,\n",
    "        is_focusnode=True,\n",
    "        label_opts=opts.LabelOpts(is_show=False),\n",
    "        linestyle_opts=opts.LineStyleOpts(width=0.5, curve=0, opacity=0.7),\n",
    "    )\n",
    "    .set_global_opts(title_opts=opts.TitleOpts(title=\"NPM Dependencies\"))\n",
    "#     .render_notebook()\n",
    "    .render(\"ggat.html\")\n",
    ")\n",
    "\n",
    "nodes = []\n",
    "\n",
    "for i in range(cora.x.shape[0]):\n",
    "    nodes.append(\n",
    "                {\"x\": int(x[i][0]),\n",
    "                 \"y\": int(x[i][1]),\n",
    "                 \"id\": str(i),\n",
    "                 \"name\": str(i),\n",
    "                 \"symbolSize\": 3,\n",
    "                 \"itemStyle\": {\"normal\": {\"color\": colors[pred[i]]}}}\n",
    "                )\n",
    "    \n",
    "\n",
    "(\n",
    "    Graph(init_opts=opts.InitOpts(width=\"1600px\", height=\"800px\"))\n",
    "    .add(\n",
    "        series_name=\"\",\n",
    "        nodes=nodes,\n",
    "        links=edges,\n",
    "        layout=\"none\",\n",
    "        is_roam=True,\n",
    "        is_focusnode=True,\n",
    "        label_opts=opts.LabelOpts(is_show=False),\n",
    "        linestyle_opts=opts.LineStyleOpts(width=0.5, curve=0, opacity=0.7),\n",
    "    )\n",
    "    .set_global_opts(title_opts=opts.TitleOpts(title=\"NPM Dependencies\"))\n",
    "#     .render_notebook()\n",
    "    .render(\"ggat_pred.html\")\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T11:57:37.314752Z",
     "start_time": "2021-07-05T11:57:36.640017Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 loss: 1.9662 train_accuracy: 0.1151\n",
      "epoch: 1 loss: 1.8435 train_accuracy: 0.3643\n",
      "epoch: 2 loss: 1.7336 train_accuracy: 0.3705\n",
      "epoch: 3 loss: 1.6420 train_accuracy: 0.3920\n",
      "epoch: 4 loss: 1.5576 train_accuracy: 0.4535\n",
      "epoch: 5 loss: 1.4394 train_accuracy: 0.5397\n",
      "epoch: 6 loss: 1.3647 train_accuracy: 0.6049\n",
      "epoch: 7 loss: 1.2710 train_accuracy: 0.6492\n",
      "epoch: 8 loss: 1.1746 train_accuracy: 0.7077\n",
      "epoch: 9 loss: 1.1114 train_accuracy: 0.7188\n",
      "val_accuracy: 0.7394\n",
      "epoch: 10 loss: 1.0327 train_accuracy: 0.7317\n",
      "epoch: 11 loss: 0.9508 train_accuracy: 0.7569\n",
      "epoch: 12 loss: 0.9128 train_accuracy: 0.7563\n",
      "epoch: 13 loss: 0.8702 train_accuracy: 0.7625\n",
      "epoch: 14 loss: 0.8111 train_accuracy: 0.7846\n",
      "epoch: 15 loss: 0.7653 train_accuracy: 0.8000\n",
      "epoch: 16 loss: 0.7211 train_accuracy: 0.8154\n",
      "epoch: 17 loss: 0.6992 train_accuracy: 0.8357\n",
      "epoch: 18 loss: 0.6328 train_accuracy: 0.8505\n",
      "epoch: 19 loss: 0.6133 train_accuracy: 0.8492\n",
      "val_accuracy: 0.8614\n",
      "epoch: 20 loss: 0.5856 train_accuracy: 0.8591\n",
      "epoch: 21 loss: 0.5496 train_accuracy: 0.8658\n",
      "epoch: 22 loss: 0.5193 train_accuracy: 0.8769\n",
      "epoch: 23 loss: 0.5184 train_accuracy: 0.8708\n",
      "epoch: 24 loss: 0.4792 train_accuracy: 0.8843\n",
      "epoch: 25 loss: 0.4743 train_accuracy: 0.8794\n",
      "epoch: 26 loss: 0.4554 train_accuracy: 0.8843\n",
      "epoch: 27 loss: 0.4478 train_accuracy: 0.8825\n",
      "epoch: 28 loss: 0.4356 train_accuracy: 0.8831\n",
      "epoch: 29 loss: 0.4201 train_accuracy: 0.8880\n",
      "val_accuracy: 0.8872\n",
      "epoch: 30 loss: 0.4101 train_accuracy: 0.8892\n",
      "epoch: 31 loss: 0.3805 train_accuracy: 0.8991\n",
      "epoch: 32 loss: 0.3726 train_accuracy: 0.8942\n",
      "epoch: 33 loss: 0.3521 train_accuracy: 0.9077\n",
      "epoch: 34 loss: 0.3423 train_accuracy: 0.9028\n",
      "epoch: 35 loss: 0.3553 train_accuracy: 0.8966\n",
      "epoch: 36 loss: 0.3638 train_accuracy: 0.8929\n",
      "epoch: 37 loss: 0.3251 train_accuracy: 0.9102\n",
      "epoch: 38 loss: 0.3272 train_accuracy: 0.9108\n",
      "epoch: 39 loss: 0.3154 train_accuracy: 0.9089\n",
      "val_accuracy: 0.8854\n",
      "epoch: 40 loss: 0.3035 train_accuracy: 0.9138\n",
      "epoch: 41 loss: 0.2985 train_accuracy: 0.9188\n",
      "epoch: 42 loss: 0.2960 train_accuracy: 0.9132\n",
      "epoch: 43 loss: 0.3020 train_accuracy: 0.9138\n",
      "epoch: 44 loss: 0.2983 train_accuracy: 0.9065\n",
      "epoch: 45 loss: 0.2942 train_accuracy: 0.9249\n",
      "epoch: 46 loss: 0.2844 train_accuracy: 0.9095\n",
      "epoch: 47 loss: 0.2726 train_accuracy: 0.9169\n",
      "epoch: 48 loss: 0.2675 train_accuracy: 0.9194\n",
      "epoch: 49 loss: 0.2748 train_accuracy: 0.9188\n",
      "val_accuracy: 0.8872\n",
      "epoch: 50 loss: 0.2615 train_accuracy: 0.9243\n",
      "epoch: 51 loss: 0.2510 train_accuracy: 0.9292\n",
      "epoch: 52 loss: 0.2526 train_accuracy: 0.9311\n",
      "epoch: 53 loss: 0.2401 train_accuracy: 0.9378\n",
      "epoch: 54 loss: 0.2448 train_accuracy: 0.9280\n",
      "epoch: 55 loss: 0.2473 train_accuracy: 0.9262\n",
      "epoch: 56 loss: 0.2404 train_accuracy: 0.9311\n",
      "epoch: 57 loss: 0.2353 train_accuracy: 0.9280\n",
      "epoch: 58 loss: 0.2272 train_accuracy: 0.9372\n",
      "epoch: 59 loss: 0.2369 train_accuracy: 0.9329\n",
      "val_accuracy: 0.8928\n",
      "epoch: 60 loss: 0.2363 train_accuracy: 0.9366\n",
      "epoch: 61 loss: 0.2237 train_accuracy: 0.9366\n",
      "epoch: 62 loss: 0.2337 train_accuracy: 0.9311\n",
      "epoch: 63 loss: 0.2196 train_accuracy: 0.9360\n",
      "epoch: 64 loss: 0.2063 train_accuracy: 0.9329\n",
      "epoch: 65 loss: 0.2295 train_accuracy: 0.9292\n",
      "epoch: 66 loss: 0.2300 train_accuracy: 0.9298\n",
      "epoch: 67 loss: 0.2174 train_accuracy: 0.9360\n",
      "epoch: 68 loss: 0.2324 train_accuracy: 0.9372\n",
      "epoch: 69 loss: 0.2071 train_accuracy: 0.9428\n",
      "val_accuracy: 0.8891\n",
      "epoch: 70 loss: 0.1966 train_accuracy: 0.9440\n",
      "epoch: 71 loss: 0.1901 train_accuracy: 0.9495\n",
      "epoch: 72 loss: 0.2112 train_accuracy: 0.9403\n",
      "epoch: 73 loss: 0.2112 train_accuracy: 0.9378\n",
      "epoch: 74 loss: 0.2041 train_accuracy: 0.9428\n",
      "epoch: 75 loss: 0.2111 train_accuracy: 0.9360\n",
      "epoch: 76 loss: 0.2015 train_accuracy: 0.9415\n",
      "epoch: 77 loss: 0.2046 train_accuracy: 0.9323\n",
      "epoch: 78 loss: 0.2027 train_accuracy: 0.9422\n",
      "epoch: 79 loss: 0.2076 train_accuracy: 0.9477\n",
      "val_accuracy: 0.8854\n",
      "epoch: 80 loss: 0.2043 train_accuracy: 0.9372\n",
      "epoch: 81 loss: 0.1916 train_accuracy: 0.9483\n",
      "epoch: 82 loss: 0.2152 train_accuracy: 0.9280\n",
      "epoch: 83 loss: 0.1969 train_accuracy: 0.9458\n",
      "epoch: 84 loss: 0.1982 train_accuracy: 0.9440\n",
      "epoch: 85 loss: 0.1993 train_accuracy: 0.9428\n",
      "epoch: 86 loss: 0.1988 train_accuracy: 0.9440\n",
      "epoch: 87 loss: 0.1995 train_accuracy: 0.9434\n",
      "epoch: 88 loss: 0.1976 train_accuracy: 0.9366\n",
      "epoch: 89 loss: 0.1881 train_accuracy: 0.9502\n",
      "val_accuracy: 0.8835\n",
      "epoch: 90 loss: 0.1900 train_accuracy: 0.9397\n",
      "epoch: 91 loss: 0.1867 train_accuracy: 0.9508\n",
      "epoch: 92 loss: 0.1983 train_accuracy: 0.9415\n",
      "epoch: 93 loss: 0.1954 train_accuracy: 0.9428\n",
      "epoch: 94 loss: 0.1799 train_accuracy: 0.9569\n",
      "epoch: 95 loss: 0.1848 train_accuracy: 0.9458\n",
      "epoch: 96 loss: 0.1797 train_accuracy: 0.9422\n",
      "epoch: 97 loss: 0.1902 train_accuracy: 0.9489\n",
      "epoch: 98 loss: 0.1743 train_accuracy: 0.9520\n",
      "epoch: 99 loss: 0.1817 train_accuracy: 0.9483\n",
      "val_accuracy: 0.8817\n",
      "epoch: 100 loss: 0.1634 train_accuracy: 0.9575\n",
      "epoch: 101 loss: 0.1738 train_accuracy: 0.9557\n",
      "epoch: 102 loss: 0.1829 train_accuracy: 0.9434\n",
      "epoch: 103 loss: 0.1846 train_accuracy: 0.9422\n",
      "epoch: 104 loss: 0.1869 train_accuracy: 0.9458\n",
      "epoch: 105 loss: 0.1855 train_accuracy: 0.9440\n",
      "epoch: 106 loss: 0.1744 train_accuracy: 0.9538\n",
      "epoch: 107 loss: 0.1703 train_accuracy: 0.9489\n",
      "epoch: 108 loss: 0.1814 train_accuracy: 0.9458\n",
      "epoch: 109 loss: 0.1666 train_accuracy: 0.9569\n",
      "val_accuracy: 0.8780\n",
      "epoch: 110 loss: 0.1625 train_accuracy: 0.9551\n",
      "epoch: 111 loss: 0.1806 train_accuracy: 0.9465\n",
      "epoch: 112 loss: 0.1740 train_accuracy: 0.9495\n",
      "epoch: 113 loss: 0.1638 train_accuracy: 0.9526\n",
      "epoch: 114 loss: 0.1656 train_accuracy: 0.9508\n",
      "epoch: 115 loss: 0.1740 train_accuracy: 0.9526\n",
      "epoch: 116 loss: 0.1682 train_accuracy: 0.9477\n",
      "epoch: 117 loss: 0.1725 train_accuracy: 0.9520\n",
      "epoch: 118 loss: 0.1681 train_accuracy: 0.9489\n",
      "epoch: 119 loss: 0.1664 train_accuracy: 0.9563\n",
      "val_accuracy: 0.8799\n",
      "epoch: 120 loss: 0.1612 train_accuracy: 0.9495\n",
      "epoch: 121 loss: 0.1624 train_accuracy: 0.9538\n",
      "epoch: 122 loss: 0.1545 train_accuracy: 0.9575\n",
      "epoch: 123 loss: 0.1685 train_accuracy: 0.9502\n",
      "epoch: 124 loss: 0.1614 train_accuracy: 0.9532\n",
      "epoch: 125 loss: 0.1637 train_accuracy: 0.9551\n",
      "epoch: 126 loss: 0.1509 train_accuracy: 0.9575\n",
      "epoch: 127 loss: 0.1618 train_accuracy: 0.9538\n",
      "epoch: 128 loss: 0.1618 train_accuracy: 0.9551\n",
      "epoch: 129 loss: 0.1686 train_accuracy: 0.9502\n",
      "val_accuracy: 0.8799\n",
      "epoch: 130 loss: 0.1613 train_accuracy: 0.9538\n",
      "epoch: 131 loss: 0.1615 train_accuracy: 0.9557\n",
      "epoch: 132 loss: 0.1680 train_accuracy: 0.9489\n",
      "epoch: 133 loss: 0.1643 train_accuracy: 0.9557\n",
      "epoch: 134 loss: 0.1569 train_accuracy: 0.9489\n",
      "epoch: 135 loss: 0.1513 train_accuracy: 0.9545\n",
      "epoch: 136 loss: 0.1493 train_accuracy: 0.9563\n",
      "epoch: 137 loss: 0.1355 train_accuracy: 0.9643\n",
      "epoch: 138 loss: 0.1565 train_accuracy: 0.9538\n",
      "epoch: 139 loss: 0.1615 train_accuracy: 0.9508\n",
      "val_accuracy: 0.8854\n",
      "epoch: 140 loss: 0.1645 train_accuracy: 0.9557\n",
      "epoch: 141 loss: 0.1625 train_accuracy: 0.9557\n",
      "epoch: 142 loss: 0.1490 train_accuracy: 0.9545\n",
      "epoch: 143 loss: 0.1524 train_accuracy: 0.9575\n",
      "epoch: 144 loss: 0.1634 train_accuracy: 0.9563\n",
      "epoch: 145 loss: 0.1682 train_accuracy: 0.9446\n",
      "epoch: 146 loss: 0.1517 train_accuracy: 0.9582\n",
      "epoch: 147 loss: 0.1440 train_accuracy: 0.9588\n",
      "epoch: 148 loss: 0.1525 train_accuracy: 0.9569\n",
      "epoch: 149 loss: 0.1555 train_accuracy: 0.9538\n",
      "val_accuracy: 0.8817\n",
      "epoch: 150 loss: 0.1579 train_accuracy: 0.9532\n",
      "epoch: 151 loss: 0.1587 train_accuracy: 0.9563\n",
      "epoch: 152 loss: 0.1516 train_accuracy: 0.9489\n",
      "epoch: 153 loss: 0.1564 train_accuracy: 0.9557\n",
      "epoch: 154 loss: 0.1432 train_accuracy: 0.9649\n",
      "epoch: 155 loss: 0.1592 train_accuracy: 0.9532\n",
      "epoch: 156 loss: 0.1486 train_accuracy: 0.9532\n",
      "epoch: 157 loss: 0.1549 train_accuracy: 0.9532\n",
      "epoch: 158 loss: 0.1472 train_accuracy: 0.9655\n",
      "epoch: 159 loss: 0.1450 train_accuracy: 0.9600\n",
      "val_accuracy: 0.8835\n",
      "epoch: 160 loss: 0.1448 train_accuracy: 0.9637\n",
      "epoch: 161 loss: 0.1422 train_accuracy: 0.9588\n",
      "epoch: 162 loss: 0.1511 train_accuracy: 0.9625\n",
      "epoch: 163 loss: 0.1544 train_accuracy: 0.9569\n",
      "epoch: 164 loss: 0.1444 train_accuracy: 0.9582\n",
      "epoch: 165 loss: 0.1513 train_accuracy: 0.9557\n",
      "epoch: 166 loss: 0.1528 train_accuracy: 0.9526\n",
      "epoch: 167 loss: 0.1510 train_accuracy: 0.9606\n",
      "epoch: 168 loss: 0.1367 train_accuracy: 0.9600\n",
      "epoch: 169 loss: 0.1485 train_accuracy: 0.9538\n",
      "val_accuracy: 0.8780\n",
      "epoch: 170 loss: 0.1406 train_accuracy: 0.9612\n",
      "epoch: 171 loss: 0.1482 train_accuracy: 0.9575\n",
      "epoch: 172 loss: 0.1306 train_accuracy: 0.9662\n",
      "epoch: 173 loss: 0.1375 train_accuracy: 0.9637\n",
      "epoch: 174 loss: 0.1415 train_accuracy: 0.9637\n",
      "epoch: 175 loss: 0.1464 train_accuracy: 0.9483\n",
      "epoch: 176 loss: 0.1384 train_accuracy: 0.9612\n",
      "epoch: 177 loss: 0.1280 train_accuracy: 0.9692\n",
      "epoch: 178 loss: 0.1421 train_accuracy: 0.9569\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 179 loss: 0.1397 train_accuracy: 0.9600\n",
      "val_accuracy: 0.8817\n",
      "epoch: 180 loss: 0.1381 train_accuracy: 0.9526\n",
      "epoch: 181 loss: 0.1494 train_accuracy: 0.9520\n",
      "epoch: 182 loss: 0.1403 train_accuracy: 0.9594\n",
      "epoch: 183 loss: 0.1386 train_accuracy: 0.9588\n",
      "epoch: 184 loss: 0.1367 train_accuracy: 0.9649\n",
      "epoch: 185 loss: 0.1310 train_accuracy: 0.9594\n",
      "epoch: 186 loss: 0.1389 train_accuracy: 0.9606\n",
      "epoch: 187 loss: 0.1429 train_accuracy: 0.9606\n",
      "epoch: 188 loss: 0.1345 train_accuracy: 0.9582\n",
      "epoch: 189 loss: 0.1318 train_accuracy: 0.9625\n",
      "val_accuracy: 0.8780\n",
      "epoch: 190 loss: 0.1378 train_accuracy: 0.9625\n",
      "epoch: 191 loss: 0.1361 train_accuracy: 0.9631\n",
      "epoch: 192 loss: 0.1349 train_accuracy: 0.9655\n",
      "epoch: 193 loss: 0.1208 train_accuracy: 0.9668\n",
      "epoch: 194 loss: 0.1380 train_accuracy: 0.9612\n",
      "epoch: 195 loss: 0.1272 train_accuracy: 0.9637\n",
      "epoch: 196 loss: 0.1408 train_accuracy: 0.9594\n",
      "epoch: 197 loss: 0.1341 train_accuracy: 0.9662\n",
      "epoch: 198 loss: 0.1466 train_accuracy: 0.9588\n",
      "epoch: 199 loss: 0.1228 train_accuracy: 0.9637\n",
      "val_accuracy: 0.8725\n",
      "0.8853974121996303\n"
     ]
    }
   ],
   "source": [
    "model = GCNNet(features.shape[1], len(label_to_index)).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n",
    "\n",
    "val_best = 0\n",
    "test_best = 0\n",
    "\n",
    "for epoch in range(200):\n",
    "    optimizer.zero_grad()\n",
    "    out = model(cora)\n",
    "    _, pred = out.max(dim=1)\n",
    "    correct = int(pred[train_mask].eq(cora.y[train_mask]).sum().item())\n",
    "    acc = correct / len(train_mask)\n",
    "    loss = F.nll_loss(out[train_mask], cora.y[train_mask])\n",
    "    print('epoch: %d loss: %.4f train_accuracy: %.4f' %(epoch, loss, acc))\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    if((epoch + 1)% 10 == 0):\n",
    "        model.eval()\n",
    "        _, pred = model(cora).max(dim=1)\n",
    "        correct = int(pred[val_mask].eq(cora.y[val_mask]).sum().item())\n",
    "        val_acc = correct / len(val_mask)\n",
    "        print('val_accuracy: {:.4f}'.format(val_acc))\n",
    "        \n",
    "        if(val_acc > val_best):\n",
    "            val_best = val_acc\n",
    "            correct = int(pred[test_mask].eq(cora.y[test_mask]).sum().item())\n",
    "            test_acc = correct / len(test_mask)\n",
    "            test_best = test_acc\n",
    "        model.train()\n",
    "        \n",
    "print(test_best)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T10:04:01.263365Z",
     "start_time": "2021-07-05T10:03:59.744526Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 loss: 1.9330 train_accuracy: 0.2462\n",
      "epoch: 1 loss: 1.8299 train_accuracy: 0.4388\n",
      "epoch: 2 loss: 1.7190 train_accuracy: 0.4142\n",
      "epoch: 3 loss: 1.5830 train_accuracy: 0.4677\n",
      "epoch: 4 loss: 1.4923 train_accuracy: 0.4585\n",
      "epoch: 5 loss: 1.4227 train_accuracy: 0.5083\n",
      "epoch: 6 loss: 1.3640 train_accuracy: 0.5483\n",
      "epoch: 7 loss: 1.3090 train_accuracy: 0.5840\n",
      "epoch: 8 loss: 1.2825 train_accuracy: 0.6018\n",
      "epoch: 9 loss: 1.2106 train_accuracy: 0.6517\n",
      "val_accuracy: 0.8189\n",
      "epoch: 10 loss: 1.1971 train_accuracy: 0.6498\n",
      "epoch: 11 loss: 1.1439 train_accuracy: 0.6671\n",
      "epoch: 12 loss: 1.1155 train_accuracy: 0.7052\n",
      "epoch: 13 loss: 1.0902 train_accuracy: 0.6966\n",
      "epoch: 14 loss: 1.0251 train_accuracy: 0.7329\n",
      "epoch: 15 loss: 1.0031 train_accuracy: 0.7262\n",
      "epoch: 16 loss: 1.0000 train_accuracy: 0.7305\n",
      "epoch: 17 loss: 0.9591 train_accuracy: 0.7520\n",
      "epoch: 18 loss: 0.9439 train_accuracy: 0.7434\n",
      "epoch: 19 loss: 0.9383 train_accuracy: 0.7409\n",
      "val_accuracy: 0.8725\n",
      "epoch: 20 loss: 0.8942 train_accuracy: 0.7582\n",
      "epoch: 21 loss: 0.9239 train_accuracy: 0.7391\n",
      "epoch: 22 loss: 0.8837 train_accuracy: 0.7489\n",
      "epoch: 23 loss: 0.8549 train_accuracy: 0.7545\n",
      "epoch: 24 loss: 0.8234 train_accuracy: 0.7655\n",
      "epoch: 25 loss: 0.8543 train_accuracy: 0.7514\n",
      "epoch: 26 loss: 0.8538 train_accuracy: 0.7502\n",
      "epoch: 27 loss: 0.8282 train_accuracy: 0.7545\n",
      "epoch: 28 loss: 0.8194 train_accuracy: 0.7686\n",
      "epoch: 29 loss: 0.8262 train_accuracy: 0.7637\n",
      "val_accuracy: 0.8743\n",
      "epoch: 30 loss: 0.8026 train_accuracy: 0.7575\n",
      "epoch: 31 loss: 0.7925 train_accuracy: 0.7625\n",
      "epoch: 32 loss: 0.8073 train_accuracy: 0.7532\n",
      "epoch: 33 loss: 0.7897 train_accuracy: 0.7575\n",
      "epoch: 34 loss: 0.7720 train_accuracy: 0.7575\n",
      "epoch: 35 loss: 0.8102 train_accuracy: 0.7643\n",
      "epoch: 36 loss: 0.7740 train_accuracy: 0.7649\n",
      "epoch: 37 loss: 0.7416 train_accuracy: 0.7649\n",
      "epoch: 38 loss: 0.7485 train_accuracy: 0.7754\n",
      "epoch: 39 loss: 0.7564 train_accuracy: 0.7631\n",
      "val_accuracy: 0.8743\n",
      "epoch: 40 loss: 0.7173 train_accuracy: 0.7766\n",
      "epoch: 41 loss: 0.7721 train_accuracy: 0.7545\n",
      "epoch: 42 loss: 0.6999 train_accuracy: 0.7840\n",
      "epoch: 43 loss: 0.7258 train_accuracy: 0.7815\n",
      "epoch: 44 loss: 0.7444 train_accuracy: 0.7594\n",
      "epoch: 45 loss: 0.7561 train_accuracy: 0.7600\n",
      "epoch: 46 loss: 0.7478 train_accuracy: 0.7538\n",
      "epoch: 47 loss: 0.7298 train_accuracy: 0.7563\n",
      "epoch: 48 loss: 0.7060 train_accuracy: 0.7760\n",
      "epoch: 49 loss: 0.6993 train_accuracy: 0.7778\n",
      "val_accuracy: 0.8725\n",
      "epoch: 50 loss: 0.7356 train_accuracy: 0.7686\n",
      "epoch: 51 loss: 0.7204 train_accuracy: 0.7655\n",
      "epoch: 52 loss: 0.6744 train_accuracy: 0.7858\n",
      "epoch: 53 loss: 0.6861 train_accuracy: 0.7766\n",
      "epoch: 54 loss: 0.6862 train_accuracy: 0.7822\n",
      "epoch: 55 loss: 0.6696 train_accuracy: 0.7840\n",
      "epoch: 56 loss: 0.6725 train_accuracy: 0.7852\n",
      "epoch: 57 loss: 0.7171 train_accuracy: 0.7668\n",
      "epoch: 58 loss: 0.7115 train_accuracy: 0.7655\n",
      "epoch: 59 loss: 0.6872 train_accuracy: 0.7877\n",
      "val_accuracy: 0.8725\n",
      "epoch: 60 loss: 0.6717 train_accuracy: 0.7871\n",
      "epoch: 61 loss: 0.7006 train_accuracy: 0.7735\n",
      "epoch: 62 loss: 0.6682 train_accuracy: 0.7785\n",
      "epoch: 63 loss: 0.6631 train_accuracy: 0.7822\n",
      "epoch: 64 loss: 0.6611 train_accuracy: 0.7822\n",
      "epoch: 65 loss: 0.6802 train_accuracy: 0.7791\n",
      "epoch: 66 loss: 0.6569 train_accuracy: 0.7803\n",
      "epoch: 67 loss: 0.6860 train_accuracy: 0.7803\n",
      "epoch: 68 loss: 0.6466 train_accuracy: 0.7926\n",
      "epoch: 69 loss: 0.6900 train_accuracy: 0.7649\n",
      "val_accuracy: 0.8799\n",
      "epoch: 70 loss: 0.6831 train_accuracy: 0.7766\n",
      "epoch: 71 loss: 0.6781 train_accuracy: 0.7692\n",
      "epoch: 72 loss: 0.6392 train_accuracy: 0.7858\n",
      "epoch: 73 loss: 0.6685 train_accuracy: 0.7742\n",
      "epoch: 74 loss: 0.6538 train_accuracy: 0.7877\n",
      "epoch: 75 loss: 0.6746 train_accuracy: 0.7766\n",
      "epoch: 76 loss: 0.6484 train_accuracy: 0.7815\n",
      "epoch: 77 loss: 0.6781 train_accuracy: 0.7717\n",
      "epoch: 78 loss: 0.6532 train_accuracy: 0.7822\n",
      "epoch: 79 loss: 0.6864 train_accuracy: 0.7748\n",
      "val_accuracy: 0.8743\n",
      "epoch: 80 loss: 0.6557 train_accuracy: 0.7748\n",
      "epoch: 81 loss: 0.6592 train_accuracy: 0.7803\n",
      "epoch: 82 loss: 0.6431 train_accuracy: 0.7717\n",
      "epoch: 83 loss: 0.6243 train_accuracy: 0.7926\n",
      "epoch: 84 loss: 0.6820 train_accuracy: 0.7569\n",
      "epoch: 85 loss: 0.6407 train_accuracy: 0.7840\n",
      "epoch: 86 loss: 0.6547 train_accuracy: 0.7797\n",
      "epoch: 87 loss: 0.6704 train_accuracy: 0.7791\n",
      "epoch: 88 loss: 0.6402 train_accuracy: 0.7698\n",
      "epoch: 89 loss: 0.6609 train_accuracy: 0.7834\n",
      "val_accuracy: 0.8780\n",
      "epoch: 90 loss: 0.6183 train_accuracy: 0.7914\n",
      "epoch: 91 loss: 0.6684 train_accuracy: 0.7785\n",
      "epoch: 92 loss: 0.6553 train_accuracy: 0.7815\n",
      "epoch: 93 loss: 0.6570 train_accuracy: 0.7809\n",
      "epoch: 94 loss: 0.6112 train_accuracy: 0.7994\n",
      "epoch: 95 loss: 0.6434 train_accuracy: 0.7791\n",
      "epoch: 96 loss: 0.6080 train_accuracy: 0.7951\n",
      "epoch: 97 loss: 0.5935 train_accuracy: 0.8049\n",
      "epoch: 98 loss: 0.5907 train_accuracy: 0.8000\n",
      "epoch: 99 loss: 0.6538 train_accuracy: 0.7785\n",
      "val_accuracy: 0.8872\n",
      "epoch: 100 loss: 0.6508 train_accuracy: 0.7754\n",
      "epoch: 101 loss: 0.6222 train_accuracy: 0.7926\n",
      "epoch: 102 loss: 0.6031 train_accuracy: 0.8018\n",
      "epoch: 103 loss: 0.6322 train_accuracy: 0.7895\n",
      "epoch: 104 loss: 0.6724 train_accuracy: 0.7772\n",
      "epoch: 105 loss: 0.6660 train_accuracy: 0.7735\n",
      "epoch: 106 loss: 0.6324 train_accuracy: 0.7938\n",
      "epoch: 107 loss: 0.6279 train_accuracy: 0.7822\n",
      "epoch: 108 loss: 0.6271 train_accuracy: 0.7871\n",
      "epoch: 109 loss: 0.6203 train_accuracy: 0.7852\n",
      "val_accuracy: 0.8854\n",
      "epoch: 110 loss: 0.6092 train_accuracy: 0.7865\n",
      "epoch: 111 loss: 0.6555 train_accuracy: 0.7729\n",
      "epoch: 112 loss: 0.6212 train_accuracy: 0.7889\n",
      "epoch: 113 loss: 0.6060 train_accuracy: 0.7982\n",
      "epoch: 114 loss: 0.6349 train_accuracy: 0.7871\n",
      "epoch: 115 loss: 0.6153 train_accuracy: 0.7926\n",
      "epoch: 116 loss: 0.6276 train_accuracy: 0.7963\n",
      "epoch: 117 loss: 0.6237 train_accuracy: 0.7840\n",
      "epoch: 118 loss: 0.6199 train_accuracy: 0.7975\n",
      "epoch: 119 loss: 0.5845 train_accuracy: 0.8055\n",
      "val_accuracy: 0.8725\n",
      "epoch: 120 loss: 0.5719 train_accuracy: 0.8037\n",
      "epoch: 121 loss: 0.6364 train_accuracy: 0.7840\n",
      "epoch: 122 loss: 0.5879 train_accuracy: 0.8098\n",
      "epoch: 123 loss: 0.6136 train_accuracy: 0.7858\n",
      "epoch: 124 loss: 0.6434 train_accuracy: 0.7785\n",
      "epoch: 125 loss: 0.6389 train_accuracy: 0.7791\n",
      "epoch: 126 loss: 0.6096 train_accuracy: 0.7865\n",
      "epoch: 127 loss: 0.5987 train_accuracy: 0.7938\n",
      "epoch: 128 loss: 0.6256 train_accuracy: 0.7858\n",
      "epoch: 129 loss: 0.6265 train_accuracy: 0.7877\n",
      "val_accuracy: 0.8688\n",
      "epoch: 130 loss: 0.6081 train_accuracy: 0.7902\n",
      "epoch: 131 loss: 0.6155 train_accuracy: 0.7963\n",
      "epoch: 132 loss: 0.6160 train_accuracy: 0.7902\n",
      "epoch: 133 loss: 0.6106 train_accuracy: 0.7932\n",
      "epoch: 134 loss: 0.6057 train_accuracy: 0.7883\n",
      "epoch: 135 loss: 0.6196 train_accuracy: 0.7865\n",
      "epoch: 136 loss: 0.5861 train_accuracy: 0.7982\n",
      "epoch: 137 loss: 0.5842 train_accuracy: 0.8049\n",
      "epoch: 138 loss: 0.6240 train_accuracy: 0.7803\n",
      "epoch: 139 loss: 0.6250 train_accuracy: 0.7858\n",
      "val_accuracy: 0.8762\n",
      "epoch: 140 loss: 0.6195 train_accuracy: 0.7945\n",
      "epoch: 141 loss: 0.6394 train_accuracy: 0.7809\n",
      "epoch: 142 loss: 0.6113 train_accuracy: 0.7957\n",
      "epoch: 143 loss: 0.5899 train_accuracy: 0.8018\n",
      "epoch: 144 loss: 0.6020 train_accuracy: 0.7938\n",
      "epoch: 145 loss: 0.5707 train_accuracy: 0.8068\n",
      "epoch: 146 loss: 0.5908 train_accuracy: 0.7969\n",
      "epoch: 147 loss: 0.5866 train_accuracy: 0.8012\n",
      "epoch: 148 loss: 0.6268 train_accuracy: 0.7858\n",
      "epoch: 149 loss: 0.6065 train_accuracy: 0.7828\n",
      "val_accuracy: 0.8835\n",
      "epoch: 150 loss: 0.6086 train_accuracy: 0.7895\n",
      "epoch: 151 loss: 0.5934 train_accuracy: 0.7914\n",
      "epoch: 152 loss: 0.5888 train_accuracy: 0.7932\n",
      "epoch: 153 loss: 0.5920 train_accuracy: 0.7938\n",
      "epoch: 154 loss: 0.6043 train_accuracy: 0.7938\n",
      "epoch: 155 loss: 0.5862 train_accuracy: 0.8031\n",
      "epoch: 156 loss: 0.5998 train_accuracy: 0.8031\n",
      "epoch: 157 loss: 0.5978 train_accuracy: 0.7969\n",
      "epoch: 158 loss: 0.6051 train_accuracy: 0.7975\n",
      "epoch: 159 loss: 0.5830 train_accuracy: 0.8025\n",
      "val_accuracy: 0.8799\n",
      "epoch: 160 loss: 0.6019 train_accuracy: 0.7938\n",
      "epoch: 161 loss: 0.5957 train_accuracy: 0.7920\n",
      "epoch: 162 loss: 0.6494 train_accuracy: 0.7735\n",
      "epoch: 163 loss: 0.6133 train_accuracy: 0.7852\n",
      "epoch: 164 loss: 0.6405 train_accuracy: 0.7785\n",
      "epoch: 165 loss: 0.6188 train_accuracy: 0.7846\n",
      "epoch: 166 loss: 0.6141 train_accuracy: 0.8000\n",
      "epoch: 167 loss: 0.5920 train_accuracy: 0.8012\n",
      "epoch: 168 loss: 0.5859 train_accuracy: 0.8012\n",
      "epoch: 169 loss: 0.6271 train_accuracy: 0.7803\n",
      "val_accuracy: 0.8725\n",
      "epoch: 170 loss: 0.6337 train_accuracy: 0.7723\n",
      "epoch: 171 loss: 0.6025 train_accuracy: 0.7932\n",
      "epoch: 172 loss: 0.6122 train_accuracy: 0.7957\n",
      "epoch: 173 loss: 0.6027 train_accuracy: 0.7908\n",
      "epoch: 174 loss: 0.5845 train_accuracy: 0.7994\n",
      "epoch: 175 loss: 0.6154 train_accuracy: 0.7858\n",
      "epoch: 176 loss: 0.6178 train_accuracy: 0.7877\n",
      "epoch: 177 loss: 0.5812 train_accuracy: 0.7957\n",
      "epoch: 178 loss: 0.6232 train_accuracy: 0.7883\n",
      "epoch: 179 loss: 0.5899 train_accuracy: 0.7926\n",
      "val_accuracy: 0.8688\n",
      "epoch: 180 loss: 0.6111 train_accuracy: 0.7908\n",
      "epoch: 181 loss: 0.5784 train_accuracy: 0.8037\n",
      "epoch: 182 loss: 0.5987 train_accuracy: 0.7957\n",
      "epoch: 183 loss: 0.5866 train_accuracy: 0.7914\n",
      "epoch: 184 loss: 0.5934 train_accuracy: 0.7988\n",
      "epoch: 185 loss: 0.6403 train_accuracy: 0.7711\n",
      "epoch: 186 loss: 0.6078 train_accuracy: 0.7871\n",
      "epoch: 187 loss: 0.6059 train_accuracy: 0.7889\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 188 loss: 0.6133 train_accuracy: 0.7926\n",
      "epoch: 189 loss: 0.6185 train_accuracy: 0.7815\n",
      "val_accuracy: 0.8743\n",
      "epoch: 190 loss: 0.5868 train_accuracy: 0.7994\n",
      "epoch: 191 loss: 0.6040 train_accuracy: 0.7871\n",
      "epoch: 192 loss: 0.6174 train_accuracy: 0.7828\n",
      "epoch: 193 loss: 0.5894 train_accuracy: 0.7932\n",
      "epoch: 194 loss: 0.5950 train_accuracy: 0.7840\n",
      "epoch: 195 loss: 0.5942 train_accuracy: 0.7883\n",
      "epoch: 196 loss: 0.5993 train_accuracy: 0.7975\n",
      "epoch: 197 loss: 0.5973 train_accuracy: 0.7957\n",
      "epoch: 198 loss: 0.5961 train_accuracy: 0.7957\n",
      "epoch: 199 loss: 0.6363 train_accuracy: 0.7791\n",
      "val_accuracy: 0.8780\n",
      "0.8817005545286506\n"
     ]
    }
   ],
   "source": [
    "model = GATNet(features.shape[1], len(label_to_index)).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n",
    "\n",
    "val_best = 0\n",
    "test_best = 0\n",
    "\n",
    "for epoch in range(200):\n",
    "    optimizer.zero_grad()\n",
    "    out = model(cora)\n",
    "    _, pred = out.max(dim=1)\n",
    "    correct = int(pred[train_mask].eq(cora.y[train_mask]).sum().item())\n",
    "    acc = correct / len(train_mask)\n",
    "    loss = F.nll_loss(out[train_mask], cora.y[train_mask])\n",
    "    print('epoch: %d loss: %.4f train_accuracy: %.4f' %(epoch, loss, acc))\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    if((epoch + 1)% 10 == 0):\n",
    "        model.eval()\n",
    "        _, pred = model(cora).max(dim=1)\n",
    "        correct = int(pred[val_mask].eq(cora.y[val_mask]).sum().item())\n",
    "        val_acc = correct / len(val_mask)\n",
    "        print('val_accuracy: {:.4f}'.format(val_acc))\n",
    "        \n",
    "        if(val_acc > val_best):\n",
    "            val_best = val_acc\n",
    "            correct = int(pred[test_mask].eq(cora.y[test_mask]).sum().item())\n",
    "            test_acc = correct / len(test_mask)\n",
    "            test_best = test_acc\n",
    "        model.train()\n",
    "        \n",
    "print(test_best)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "song",
   "language": "python",
   "name": "song"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
