{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 介绍"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用原生Pytorch实现GNN模型，在论文引用数据集Cora上进行训练和测试，实现的模型有论文[《The Graph Neural Network Model》](https://persagen.com/files/misc/scarselli2009graph.pdf)的Linear GNN，以及GCN模型(使用Pytorch实现PyG库的GCN模型)。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 导入库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "from tqdm import tqdm\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "node_num, feat_dim, stat_dim, num_class, T\n",
    "feat_Matrix, X_Node, X_Neis, dg_list\n",
    "'''\n",
    "content_path = \"./cora/cora.content\"\n",
    "cite_path = \"./cora/cora.cites\"\n",
    "\n",
    "# 读取文本内容\n",
    "with open(content_path, \"r\") as fp:\n",
    "    contents = fp.readlines()\n",
    "with open(cite_path, \"r\") as fp:\n",
    "    cites = fp.readlines()\n",
    "\n",
    "contents = np.array([np.array(l.strip().split(\"\\t\")) for l in contents])\n",
    "paper_list, feat_list, label_list = np.split(contents, [1,-1], axis=1)\n",
    "paper_list, label_list = np.squeeze(paper_list), np.squeeze(label_list)\n",
    "# Paper -> Index dict\n",
    "paper_dict = dict([(key, val) for val, key in enumerate(paper_list)])\n",
    "# Label -> Index 字典\n",
    "labels = list(set(label_list))\n",
    "label_dict = dict([(key, val) for val, key in enumerate(labels)])\n",
    "# Edge_index\n",
    "cites = [i.strip().split(\"\\t\") for i in cites]\n",
    "cites = np.array([[paper_dict[i[0]], paper_dict[i[1]]] for i in cites], \n",
    "                 np.int64).T   # (2, edge)\n",
    "cites = np.concatenate((cites, cites[::-1, :]), axis=1)  # (2, 2*edge) or (2, E)\n",
    "# Degree\n",
    "_, degree_list = np.unique(cites[0,:], return_counts=True)\n",
    "\n",
    "# Input\n",
    "node_num = len(paper_list)\n",
    "feat_dim = feat_list.shape[1]\n",
    "stat_dim = 32\n",
    "num_class = len(labels)\n",
    "T = 2\n",
    "feat_Matrix = torch.Tensor(feat_list.astype(np.float32))\n",
    "X_Node, X_Neis = np.split(cites, 2, axis=0)\n",
    "X_Node, X_Neis = torch.from_numpy(np.squeeze(X_Node)), \\\n",
    "                 torch.from_numpy(np.squeeze(X_Neis))\n",
    "dg_list = degree_list[X_Node]\n",
    "label_list = np.array([label_dict[i] for i in label_list])\n",
    "label_list = torch.from_numpy(label_list)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 显示处理结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "********************Data Process Info********************\n",
      "==> Number of node : 2708\n",
      "==> Number of edges : 10858/2=5429\n",
      "==> Number of classes : 7\n",
      "==> Dimension of node features : 1433\n",
      "==> Dimension of node state : 32\n",
      "==> T : 2\n",
      "==> Shape of feat_Matrix : torch.Size([2708, 1433])\n",
      "==> Shape of X_Node : torch.Size([10858])\n",
      "==> Shape of X_Neis : torch.Size([10858])\n",
      "==> Length of dg_list : 10858\n"
     ]
    }
   ],
   "source": [
    "print(\"{}Data Process Info{}\".format(\"*\"*20, \"*\"*20))\n",
    "print(\"==> Number of node : {}\".format(node_num))\n",
    "print(\"==> Number of edges : {}/2={}\".format(cites.shape[1], int(cites.shape[1]/2)))\n",
    "print(\"==> Number of classes : {}\".format(num_class))\n",
    "print(\"==> Dimension of node features : {}\".format(feat_dim))\n",
    "print(\"==> Dimension of node state : {}\".format(stat_dim))\n",
    "print(\"==> T : {}\".format(T))\n",
    "print(\"==> Shape of feat_Matrix : {}\".format(feat_Matrix.shape))\n",
    "print(\"==> Shape of X_Node : {}\".format(X_Node.shape))\n",
    "print(\"==> Shape of X_Neis : {}\".format(X_Neis.shape))\n",
    "print(\"==> Length of dg_list : {}\".format(len(dg_list)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Linear GNN模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Linear GNN模型使用的是论文《The Graph Neural Network Model》中提到的Linear GNN模型，原论文将该模型使用在子图匹配问题上(本质上也是节点分类问题)，该实现将模型应用在Cora的数据集上。  \n",
    "模型的观点大致如下：\n",
    "+ 每个节点持有两个向量$\\mathbf{x}_i$和$\\mathbf{h}^t_i$，前者表示节点的特征向量，后者表示节点的状态向量，最初所有节点的初始化状态向量为0，即$\\mathbf{h}^0_i=0,i=1,2...,N$。\n",
    "+ 通过模型对节点的状态向量进行迭代更新，迭代次数为$T$次，直到达到不动点。\n",
    "+ 使用$T$时刻(即不动点处)节点的状态向量和特征向量来得到节点的输出。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "实现论文中的Xi函数，作为Hw函数的转换矩阵A，根据节点对(i,j)的特征向量\n",
    "生成A矩阵，其中ln是特征向量维度，s为状态向量维度。\n",
    "Initialization :\n",
    "Input :\n",
    "    ln : (int)特征向量维度\n",
    "    s : (int)状态向量维度\n",
    "Forward :\n",
    "Input :\n",
    "    x : (Tensor)节点对(i,j)的特征向量拼接起来，shape为(N, 2*ln)\n",
    "Output :\n",
    "    out : (Tensor)A矩阵，shape为(N, s, s)\n",
    "'''\n",
    "class Xi(nn.Module):\n",
    "    def __init__(self, ln, s):\n",
    "        super(Xi, self).__init__()\n",
    "        self.ln = ln   # 节点特征向量的维度\n",
    "        self.s = s     # 节点的个数\n",
    "        \n",
    "        # 线性网络层\n",
    "        self.linear = nn.Linear(in_features=2 * ln,\n",
    "                                out_features=s ** 2,\n",
    "                                bias=True)\n",
    "        # 激活函数\n",
    "        self.tanh = nn.Tanh()\n",
    "        \n",
    "    def forward(self, X):\n",
    "        bs = X.size()[0]\n",
    "        out = self.linear(X)\n",
    "        out = self.tanh(out)\n",
    "        return out.view(bs, self.s, self.s)\n",
    "\n",
    "\n",
    "'''\n",
    "实现论文中的Rou函数，作为Hw函数的偏置项b\n",
    "Initialization :\n",
    "Input :\n",
    "    ln : (int)特征向量维度\n",
    "    s : (int)状态向量维度\n",
    "Forward :\n",
    "Input :\n",
    "    x : (Tensor)节点的特征向量矩阵，shape(N, ln)\n",
    "Output :\n",
    "    out : (Tensor)偏置矩阵，shape(N, s)\n",
    "'''\n",
    "class Rou(nn.Module):\n",
    "    def __init__(self, ln, s):\n",
    "        super(Rou, self).__init__()\n",
    "        self.linear = nn.Linear(in_features=ln,\n",
    "                                out_features=s,\n",
    "                                bias=True)\n",
    "        self.tanh = nn.Tanh()\n",
    "    def forward(self, X):\n",
    "        return self.tanh(self.linear(X))\n",
    "\n",
    "'''\n",
    "实现Hw函数，即信息生成函数\n",
    "Initialize :\n",
    "Input :\n",
    "    ln : (int)节点特征向量维度\n",
    "    s : (int)节点状态向量维度\n",
    "    mu : (int)设定的压缩映射的压缩系数\n",
    "Forward :\n",
    "Input :\n",
    "    X : (Tensor)每一行为一条边的两个节点特征向量连接起来得到的向量，shape为(N, 2*ln)\n",
    "    H : (Tensor)与X每行对应的source节点的状态向量\n",
    "    dg_list : (list or Tensor)与X每行对应的source节点的度向量\n",
    "Output :\n",
    "    out : (Tensor)Hw函数的输出\n",
    "'''\n",
    "class Hw(nn.Module):\n",
    "    def __init__(self, ln, s, mu=0.9):\n",
    "        super(Hw, self).__init__()\n",
    "        self.ln = ln\n",
    "        self.s = s\n",
    "        self.mu = mu\n",
    "        \n",
    "        # 初始化网络层\n",
    "        self.Xi = Xi(ln, s)\n",
    "        self.Rou = Rou(ln, s)\n",
    "    \n",
    "    def forward(self, X, H, dg_list):\n",
    "        if isinstance(dg_list, list) or isinstance(dg_list, np.ndarray):\n",
    "            dg_list = torch.Tensor(dg_list).to(X.device)\n",
    "        elif isinstance(dg_list, torch.Tensor):\n",
    "            pass\n",
    "        else:\n",
    "            raise TypeError(\"==> dg_list should be list or tensor, not {}\".format(type(dg_list)))\n",
    "        A = (self.Xi(X) * self.mu / self.s) / dg_list.view(-1, 1, 1)# (N, S, S)\n",
    "        b = self.Rou(torch.chunk(X, chunks=2, dim=1)[0])# (N, S)\n",
    "        out = torch.squeeze(torch.matmul(A, torch.unsqueeze(H, 2)),-1) + b  # (N, s, s) * (N, s) + (N, s)\n",
    "        return out    # (N, s)\n",
    "\n",
    "'''\n",
    "实现信息聚合函数，将前面使用Hw函数得到的信息按照每一个source节点进行聚合，\n",
    "之后用于更新每一个节点的状态向量。\n",
    "Initialize :\n",
    "Input :\n",
    "    node_num : (int)节点的数量\n",
    "Forward :\n",
    "Input :\n",
    "    H : (Tensor)Hw的输出，shape为(N, s)\n",
    "    X_node : (Tensor)H每一行对应source节点的索引，shape为(N, )\n",
    "Output :\n",
    "    out : (Tensor)求和式聚合之后的新的节点状态向量，shape为(V, s)，V为节点个数\n",
    "'''\n",
    "class AggrSum(nn.Module):\n",
    "    def __init__(self, node_num):\n",
    "        super(AggrSum, self).__init__()\n",
    "        self.V = node_num\n",
    "    \n",
    "    def forward(self, H, X_node):\n",
    "        # H : (N, s) -> (V, s)\n",
    "        # X_node : (N, )\n",
    "        mask = torch.stack([X_node] * self.V, 0)\n",
    "        mask = mask.float() - torch.unsqueeze(torch.range(0,self.V-1).float(), 1)\n",
    "        mask = (mask == 0).float()\n",
    "        # (V, N) * (N, s) -> (V, s)\n",
    "        return torch.mm(mask, H)\n",
    "\n",
    "'''\n",
    "实现Linear GNN模型，循环迭代计算T次，达到不动点之后，使用线性函数得到输出，进行\n",
    "分类。\n",
    "Initialize :\n",
    "Input :\n",
    "    node_num : (int)节点个数\n",
    "    feat_dim : (int)节点特征向量维度\n",
    "    stat_dim : (int)节点状态向量维度\n",
    "    T : (int)迭代计算的次数\n",
    "Forward :\n",
    "Input :\n",
    "    feat_Matrix : (Tensor)节点的特征矩阵，shape为(V, ln)\n",
    "    X_Node : (Tensor)每条边的source节点对应的索引，shape为(N, )，比如`节点i->节点j`，source节点是`节点i`\n",
    "    X_Neis : (Tensor)每条边的target节点对应的索引，shape为(N, )，比如`节点i->节点j`，target节点是`节点j`\n",
    "    dg_list : (list or Tensor)与X_Node对应节点的度列表，shape为(N, )\n",
    "Output :\n",
    "    out : (Tensor)每个节点的类别概率，shape为(V, num_class)\n",
    "'''\n",
    "class OriLinearGNN(nn.Module):\n",
    "    def __init__(self, node_num, feat_dim, stat_dim, num_class, T):\n",
    "        super(OriLinearGNN, self).__init__()\n",
    "        self.embed_dim = feat_dim\n",
    "        self.stat_dim = stat_dim\n",
    "        self.T = T\n",
    "        # 输出层\n",
    "        '''\n",
    "        self.out_layer = nn.Sequential(\n",
    "            nn.Linear(stat_dim, 16),   # ln+s -> hidden_layer\n",
    "            nn.Tanh(),\n",
    "            nn.Dropout(p=0.5),\n",
    "            nn.Linear(16, num_class)   # hidden_layer -> logits\n",
    "        )\n",
    "        '''\n",
    "        self.out_layer = nn.Linear(stat_dim, num_class)\n",
    "        self.dropout = nn.Dropout()\n",
    "        self.log_softmax = nn.LogSoftmax(dim=-1)\n",
    "        # 实现Fw\n",
    "        self.Hw = Hw(feat_dim, stat_dim)\n",
    "        # 实现H的分组求和\n",
    "        self.Aggr = AggrSum(node_num)\n",
    "        \n",
    "    def forward(self, feat_Matrix, X_Node, X_Neis, dg_list):\n",
    "        node_embeds = torch.index_select(input=feat_Matrix,\n",
    "                                         dim=0,\n",
    "                                         index=X_Node)  # (N, ln)\n",
    "        neis_embeds = torch.index_select(input=feat_Matrix,\n",
    "                                         dim=0,\n",
    "                                         index=X_Neis)  # (N, ln)\n",
    "        X = torch.cat((node_embeds, neis_embeds), 1)    # (N, 2 * ln)\n",
    "        H = torch.zeros((feat_Matrix.shape[0], self.stat_dim), dtype=torch.float32)  # (V, s)\n",
    "        H = H.to(feat_Matrix.device)\n",
    "        # 循环T次计算\n",
    "        for t in range(self.T):\n",
    "            # (V, s) -> (N, s)\n",
    "            H = torch.index_select(H, 0, X_Node)\n",
    "            # (N, s) -> (N, s)\n",
    "            H = self.Hw(X, H, dg_list)\n",
    "            # (N, s) -> (V, s)\n",
    "            H = self.Aggr(H, X_Node)\n",
    "            # print(H[1])\n",
    "        # out = torch.cat((feat_Matrix, H), 1)   # (V, ln+s)\n",
    "        out = self.log_softmax(self.dropout(self.out_layer(H)))\n",
    "        return out  # (V, num_class)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# Split dataset\n",
    "train_mask = torch.zeros(node_num, dtype=torch.uint8)\n",
    "train_mask[:node_num - 1000] = 1                  # 1700左右training\n",
    "val_mask = None                                    # 0valid\n",
    "test_mask = torch.zeros(node_num, dtype=torch.uint8)\n",
    "test_mask[node_num - 500:] = 1                    # 500test\n",
    "\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model = OriLinearGNN(node_num, feat_dim, stat_dim, num_class, T).to(device)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=1e-3)\n",
    "feat_Matrix = feat_Matrix.to(device)\n",
    "X_Node = X_Node.to(device)\n",
    "X_Neis = X_Neis.to(device)\n",
    "\n",
    "for epoch in range(200):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()\n",
    "    \n",
    "    # Get output\n",
    "    out = model(feat_Matrix, X_Node, X_Neis, dg_list)\n",
    "    \n",
    "    # Get loss\n",
    "    loss = F.nll_loss(out[train_mask], label_list[train_mask])\n",
    "    _, pred = out.max(dim=1)\n",
    "    \n",
    "    # Get predictions and calculate training accuracy\n",
    "    correct = float(pred[train_mask].eq(label_list[train_mask]).sum().item())\n",
    "    acc = correct / train_mask.sum().item()\n",
    "    print('[Epoch {}/200] Loss {:.4f}, train acc {:.4f}'.format(epoch, loss.cpu().detach().data.item(), acc))\n",
    "    \n",
    "    # Backward\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    # Evaluation on test data every 10 epochs\n",
    "    if (epoch+1) % 10 == 0:\n",
    "        model.eval()\n",
    "        _, pred = model(feat_Matrix, X_Node, X_Neis, dg_list).max(dim=1)\n",
    "        correct = float(pred[test_mask].eq(label_list[test_mask]).sum().item())\n",
    "        acc = correct / test_mask.sum().item()\n",
    "        print('Accuracy: {:.4f}'.format(acc))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### GCN模型"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "GCN模型使用简单的两层GCN，实现方式使用原生Pytorch实现，模型和PyG的GCN模型的实现相同，只是框架使用Pytorch实现。  \n",
    "关键要点如下：\n",
    "+ 每个节点只保存一个特征向量$\\mathbf{x}_i$，该特征向量的维度称为in_channel。\n",
    "+ 每个节点向周围传递信息，对每个节点进行线性变换以得到该节点需要传递出去的信息。\n",
    "+ 使用求和的方式来聚合信息。"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#### 准备数据(提前运行前面的“数据处理”部分)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Split dataset\n",
    "train_mask = torch.zeros(node_num, dtype=torch.uint8)\n",
    "train_mask[:node_num - 1000] = 1                  # 1700左右training\n",
    "val_mask = None                                    # 0valid\n",
    "test_mask = torch.zeros(node_num, dtype=torch.uint8)\n",
    "test_mask[node_num - 500:] = 1                    # 500test\n",
    "x = feat_Matrix\n",
    "edge_index = torch.from_numpy(cites)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "同Linear GNN\n",
    "'''\n",
    "class AggrSum(nn.Module):\n",
    "    def __init__(self, node_num):\n",
    "        super(AggrSum, self).__init__()\n",
    "        self.V = node_num\n",
    "    \n",
    "    def forward(self, H, X_node):\n",
    "        # H : (N, s) -> (V, s)\n",
    "        # X_node : (N, )\n",
    "        mask = torch.stack([X_node] * self.V, 0)\n",
    "        mask = mask.float() - torch.unsqueeze(torch.range(0,self.V-1).float(), 1)\n",
    "        mask = (mask == 0).float()\n",
    "        # (V, N) * (N, s) -> (V, s)\n",
    "        return torch.mm(mask, H)\n",
    "\n",
    "'''\n",
    "用于实现GCN的卷积块。\n",
    "Initialize :\n",
    "Input :\n",
    "    in_channel : (int)输入的节点特征维度\n",
    "    out_channel : (int)输出的节点特征维度\n",
    "Forward :\n",
    "Input :\n",
    "    x : (Tensor)节点的特征矩阵，shape为(N, in_channel)，N为节点个数\n",
    "    edge_index : (Tensor)边矩阵，shape为(2, E)，E为边个数。\n",
    "Output :\n",
    "    out : (Tensor)新的特征矩阵，shape为(N, out_channel)\n",
    "'''\n",
    "class GCNConv(nn.Module):\n",
    "    def __init__(self, in_channel, out_channel, node_num):\n",
    "        super(GCNConv, self).__init__()\n",
    "        self.linear = nn.Linear(in_channel, out_channel)\n",
    "        self.aggregation = AggrSum(node_num)\n",
    "        \n",
    "    def forward(self, x, edge_index):\n",
    "        # Add self-connect edges\n",
    "        edge_index = self.addSelfConnect(edge_index, x.shape[0])\n",
    "        \n",
    "        # Apply linear transform\n",
    "        x = self.linear(x)\n",
    "        \n",
    "        # Normalize message\n",
    "        row, col = edge_index\n",
    "        deg = self.calDegree(row, x.shape[0]).float()\n",
    "        deg_sqrt = deg.pow(-0.5)  # (N, )\n",
    "        norm = deg_sqrt[row] * deg_sqrt[col]\n",
    "        \n",
    "        # Node feature matrix\n",
    "        tar_matrix = torch.index_select(x, dim=0, index=col)\n",
    "        tar_matrix = norm.view(-1, 1) * tar_matrix  # (E, out_channel)\n",
    "        # Aggregate information\n",
    "        aggr =  self.aggregation(tar_matrix, row)  # (N, out_channel)\n",
    "        return aggr\n",
    "        \n",
    "    \n",
    "    def calDegree(self, edges, num_nodes):\n",
    "        ind, deg = np.unique(edges.cpu().numpy(), return_counts=True)\n",
    "        deg_tensor = torch.zeros((num_nodes, ), dtype=torch.long)\n",
    "        deg_tensor[ind] = torch.from_numpy(deg)\n",
    "        return deg_tensor.to(edges.device)\n",
    "    \n",
    "    def addSelfConnect(self, edge_index, num_nodes):\n",
    "        selfconn = torch.stack([torch.range(0, num_nodes-1, dtype=torch.long)]*2,\n",
    "                               dim=0).to(edge_index.device)\n",
    "        return torch.cat(tensors=[edge_index, selfconn],\n",
    "                         dim=1)\n",
    "    \n",
    "        \n",
    "'''\n",
    "构建模型，使用两层GCN，第一层GCN使得节点特征矩阵\n",
    "    (N, in_channel) -> (N, out_channel)\n",
    "第二层GCN直接输出\n",
    "    (N, out_channel) -> (N, num_class)\n",
    "激活函数使用relu函数，网络最后对节点的各个类别score使用softmax归一化。\n",
    "'''\n",
    "class Net(nn.Module):\n",
    "    def __init__(self, feat_dim, num_class, num_node):\n",
    "        super(Net, self).__init__()\n",
    "        self.conv1 = GCNConv(feat_dim, 16, num_node)\n",
    "        self.conv2 = GCNConv(16, num_class, num_node)\n",
    "    \n",
    "    def forward(self, x, edge_index):\n",
    "        x = self.conv1(x, edge_index)\n",
    "        x = F.relu(x)\n",
    "        x = F.dropout(x, training=self.training)\n",
    "        x = self.conv2(x, edge_index)\n",
    "        \n",
    "        return F.log_softmax(x, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "'''\n",
    "开始训练模型\n",
    "'''\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model = Net(feat_dim, num_class, node_num).to(device)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01, weight_decay=5e-4)\n",
    "x = x.to(device)\n",
    "edge_index = edge_index.to(device)\n",
    "\n",
    "for epoch in range(200):\n",
    "    model.train()\n",
    "    optimizer.zero_grad()\n",
    "    \n",
    "    # Get output\n",
    "    out = model(x, edge_index)\n",
    "    \n",
    "    # Get loss\n",
    "    loss = F.nll_loss(out[train_mask], label_list[train_mask])\n",
    "    _, pred = out.max(dim=1)\n",
    "    \n",
    "    # Get predictions and calculate training accuracy\n",
    "    correct = float(pred[train_mask].eq(label_list[train_mask]).sum().item())\n",
    "    acc = correct / train_mask.sum().item()\n",
    "    print('[Epoch {}/200] Loss {:.4f}, train acc {:.4f}'.format(epoch, loss.cpu().detach().data.item(), acc))\n",
    "    \n",
    "    # Backward\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    # Evaluation on test data every 10 epochs\n",
    "    if (epoch+1) % 10 == 0:\n",
    "        model.eval()\n",
    "        _, pred = model(x, edge_index).max(dim=1)\n",
    "        correct = float(pred[test_mask].eq(label_list[test_mask]).sum().item())\n",
    "        acc = correct / test_mask.sum().item()\n",
    "        print('Accuracy: {:.4f}'.format(acc))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
