{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 介绍\n",
    "\n",
    "使用原生的PyTorch实现GNN模型, 数据使用的是Cora"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 导入库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import functional as F"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils import data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "node_num, feat_num, stat_dim, num_classes, T\n",
    "feat_matrix, x_node, x_neis, dg_list\n",
    "\"\"\"\n",
    "\n",
    "content_path = \"./data/cora/cora.content\"\n",
    "cite_path = \"./data/cora/cora.cites\"\n",
    "\n",
    "# 读取文本内容\n",
    "with open(content_path, \"r\") as fp:\n",
    "    contents = fp.readlines()\n",
    "with open(cite_path, \"r\") as fp:\n",
    "    cites = fp.readlines()\n",
    "    \n",
    "# vetices\n",
    "contents = np.array([np.array(l.strip().split(\"\\t\")) for l in contents])\n",
    "papers, feat_arr, labels = np.split(contents, [1,-1], axis= 1)\n",
    "papers, labels = np.squeeze(papers), np.squeeze(labels)\n",
    "paper_list = list(set(papers))\n",
    "label_list = list(set(labels))\n",
    "index2paper = dict(enumerate(paper_list))\n",
    "index2label = dict(enumerate(label_list))\n",
    "paper2index = {v:k for k,v in index2paper.items()}\n",
    "label2index = {v:k for k,v in index2label.items()}\n",
    "\n",
    "# edges\n",
    "cites = [i.strip().split(\"\\t\") for i in cites]\n",
    "cites = np.array([\n",
    "    [paper2index[cite[0]], paper2index[cite[1]]] for cite in cites\n",
    "], np.int64).T\n",
    "cites = np.concatenate([cites, cites[::-1, :]], axis= 1)\n",
    "\n",
    "# degree\n",
    "degree_dict = {}\n",
    "degree_count = {}\n",
    "edge_num = cites.shape[1]\n",
    "for i in range(edge_num):\n",
    "    vertex = cites[0, i]\n",
    "    count = degree_count.get(vertex, 0)\n",
    "    neighbors = degree_dict.get(vertex, [])\n",
    "    count += 1\n",
    "    neighbors.append(cites[1, i])\n",
    "    degree_count[vertex] = count\n",
    "    degree_dict[vertex] = neighbors\n",
    "degree_list = []\n",
    "for i in range(node_num):\n",
    "    degree_list.append(degree_dict[i])\n",
    "degree_list = np.array(degree_list)\n",
    "    \n",
    "# parameters\n",
    "node_num = len(paper2index)\n",
    "feat_dim = feat_arr.shape[1]\n",
    "stat_dim = 32 # 状态的dim\n",
    "num_classes = len(label2index)\n",
    "T = 2\n",
    "feat_matrix = torch.from_numpy(feat_arr.astype(np.float32))\n",
    "x_node, x_neis = np.split(cites, 2, axis= 0)\n",
    "x_node, x_neis = np.squeeze(x_node), np.squeeze(x_neis)\n",
    "x_node, x_neis = torch.from_numpy(x_node), torch.from_numpy(x_neis)\n",
    "dg_list = degree_list[x_node]\n",
    "labels = np.array([label2index[label] for label in labels])\n",
    "labels = torch.from_numpy(labels)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 数据处理的结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "******************** Data process info ********************\n",
      "==> Number of nodes: 2708\n",
      "==> Number of edges: 5429.0\n",
      "==> Number of classes: 7\n",
      "==> Dimension of node features: 1433\n",
      "==> Dimension of node state: 32\n",
      "==> T: 2\n",
      "==> Shape of feature matrix:torch.Size([2708, 1433])\n",
      "==> Shape of x_node: torch.Size([10858])\n",
      "==> Shape of x_neis: torch.Size([10858])\n",
      "==> Length of dg_list: 10858\n"
     ]
    }
   ],
   "source": [
    "print(\"{} Data process info {}\".format(\"*\"*20, \"*\" * 20))\n",
    "print(\"==> Number of nodes: {}\".format(node_num))\n",
    "print(\"==> Number of edges: {}\".format(edge_num / 2))\n",
    "print(\"==> Number of classes: {}\".format(num_classes))\n",
    "print(\"==> Dimension of node features: {}\".format(feat_dim))\n",
    "print(\"==> Dimension of node state: {}\".format(stat_dim))\n",
    "print(\"==> T: {}\".format(T))\n",
    "print(\"==> Shape of feature matrix:{}\".format(feat_matrix.shape))\n",
    "print(\"==> Shape of x_node: {}\".format(x_node.shape))\n",
    "print(\"==> Shape of x_neis: {}\".format(x_neis.shape))\n",
    "print(\"==> Length of dg_list: {}\".format(len(dg_list)))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Linear GNN 模型\n",
    "`Linear GNN`模型使用的是论文《The Graph Neural Network Model》提到的线性图神经网络的模型, 原论文将该模型是用在子图匹配问题上(本质也是节点分类的问题), 该实现将模型应用在`Cora`数据集上.\n",
    "\n",
    "模型的观点大致如下:\n",
    "\n",
    "* 每个节点持有两个向量$x_i$和$h^t_i$, 前者表示节点的特征向量, 后者表示节点的状态向量, 最初所有节点的初始状态向量为0, 即$h_i^0=\\mathbf{0},i\\ =\\ 1,2,\\cdots,N$\n",
    "* 通过模型对节点的状态向量进行迭代更新, 迭代次数为$T$次, 直到达到不动点.\n",
    "* 通过$T$时刻(即不动点处)节点的状态向量和特征向量来得到节点的输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "实现论文中的Xi函数, 最为Hw函数的转换矩阵A, 根据结点对(i.j)的特征向量生成A矩阵, 其中ln是特征向量维度, s为状态向量维度.\n",
    "Initialization:\n",
    "Input:\n",
    "    ln: (int) 特征向量维度\n",
    "    s:  (int) 状态向量维度\n",
    "Forward:\n",
    "Input:\n",
    "    x: (Tensor) 结点对(i,j)的特征向量拼接起来, shape为(N,2*ln)\n",
    "Output:\n",
    "    out:(Tensor) A矩阵, shape为(N, s, s)\n",
    "\"\"\"\n",
    "class Xi(nn.Module):\n",
    "    def __init__(self, ln, s):\n",
    "        super(Xi, self).__init__()\n",
    "        self.ln = ln\n",
    "        self.s = s\n",
    "        \n",
    "        # 线性网络层\n",
    "        self.linear = nn.Linear(in_features= 2 * self.ln,\\\n",
    "                                out_features= self.s**2,\\\n",
    "                                bias= True)\n",
    "        # 激活函数\n",
    "        self.tanh = nn.Tanh()\n",
    "    \n",
    "    def forward(self, X):\n",
    "        bs = X.size()[0]\n",
    "        out = self.linear(X)\n",
    "        out = self.tanh(out)\n",
    "        return out.view(bs, self.s, self,s)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "实现论文中的Rou函数, 作为Hw函数的偏置项b\n",
    "Initialization :\n",
    "Input :\n",
    "    ln : (int)特征向量维度\n",
    "    s : (int)状态向量维度\n",
    "Forward :\n",
    "Input :\n",
    "    x : (Tensor)节点的特征向量矩阵，shape(N, ln)\n",
    "Output :\n",
    "    out : (Tensor)偏置矩阵，shape(N, s)\n",
    "\"\"\"\n",
    "class Rou(nn.Module):\n",
    "    def __init__(self, ln, s):\n",
    "        super(Rou, self).__init__()\n",
    "        self.linear = nn.Linear(in_features= ln,\\\n",
    "                               out_features = s,\\\n",
    "                               bias= True)\n",
    "        self.tanh = nn.Tanh()\n",
    "    \n",
    "    def forward(self, X):\n",
    "        return self.tanh(self.linear(X))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "实现Hw函数，即信息生成函数\n",
    "Initialize :\n",
    "Input :\n",
    "    ln : (int)节点特征向量维度\n",
    "    s : (int)节点状态向量维度\n",
    "    mu : (int)设定的压缩映射的压缩系数\n",
    "Forward :\n",
    "Input :\n",
    "    X : (Tensor)每一行为一条边的两个节点特征向量连接起来得到的向量，shape为(N, 2*ln)\n",
    "    H : (Tensor)与X每行对应的source节点的状态向量\n",
    "    dg_list : (list or Tensor)与X每行对应的source节点的度向量\n",
    "Output :\n",
    "    out : (Tensor)Hw函数的输出\n",
    "\"\"\"\n",
    "class Hw(nn.Module):\n",
    "    def __init__(self, ln, s, mu= 0.9):\n",
    "        super(Hw, self).__init__()\n",
    "        self.ln = ln\n",
    "        self.s = s\n",
    "        self.mu = mu\n",
    "        \n",
    "        # init net layer\n",
    "        self.Xi = Xi(self.ln, self.s)\n",
    "        self.Rou = Rou(self.ln, self.s)\n",
    "        \n",
    "    def forward(self, X, H, dg_list):\n",
    "        if isinstance(dg_list, list) or isinstance(dg_list, np.ndarray):\n",
    "            dg_list = torch.Tensor(dg_list).to(X.device)\n",
    "        elif isinstance(dg_list, torch.Tensor):\n",
    "            pass\n",
    "        else:\n",
    "            raise TypeError(\"==> dg_list should be list, ndarray or tensor, not {}\".format(type(dg_list)))\n",
    "        A = (self.Xi(X) * self.mu / self.s) / dg_list.view(-1, 1, 1) # (N, S, S)\n",
    "        b = self.Rou(torch.chunk(X, chunks= 2, dim= 1)[0]) # (N, S)\n",
    "        # TODO 搞清楚输入的数据到底是什么样子的\n",
    "        out = torch.squeeze(torch.matmul(A, torch.unsqueeze(H, 2)), -1) + b # (N, S, S) * (N, S, 1) \n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "metadata": {},
   "outputs": [],
   "source": [
    "?torch.chunk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.7.0 64-bit ('base': conda)",
   "language": "python",
   "name": "python37064bitbasecondaabf376cf925543ae9ea1556fbd81947b"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
