{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:34.418496Z",
     "start_time": "2021-07-05T12:15:34.408705Z"
    }
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch_geometric.data import Data\n",
    "import torch.nn.functional as F\n",
    "from torch_geometric.nn import GCNConv, GATConv\n",
    "from torch_geometric.utils import add_self_loops\n",
    "from sklearn.manifold import TSNE\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:35.812715Z",
     "start_time": "2021-07-05T12:15:34.540885Z"
    }
   },
   "outputs": [],
   "source": [
    "path = \"data/cora/\"\n",
    "cites = path + \"cora.cites\"\n",
    "content = path + \"cora.content\"\n",
    "\n",
    "# 索引字典，转换到从0开始编码\n",
    "index_dict = dict()\n",
    "# 标签字典\n",
    "label_to_index = dict()\n",
    "\n",
    "features = []\n",
    "labels = []\n",
    "edge_index = []\n",
    "\n",
    "draw_edge_index = []\n",
    "\n",
    "with open(content,\"r\") as f:\n",
    "    nodes = f.readlines()\n",
    "    for node in nodes:\n",
    "        node_info = node.split()\n",
    "        index_dict[int(node_info[0])] = len(index_dict)\n",
    "        features.append([int(i) for i in node_info[1:-1]])\n",
    "        \n",
    "        label_str = node_info[-1]\n",
    "        if(label_str not in label_to_index.keys()):\n",
    "            label_to_index[label_str] = len(label_to_index)\n",
    "        labels.append(label_to_index[label_str])\n",
    "\n",
    "with open(cites,\"r\") as f:\n",
    "    edges = f.readlines()\n",
    "    for edge in edges:\n",
    "        start, end = edge.split()\n",
    "        edge_index.append([index_dict[int(start)],index_dict[int(end)]])\n",
    "        draw_edge_index.append([index_dict[int(start)],index_dict[int(end)]])\n",
    "        edge_index.append([index_dict[int(end)],index_dict[int(start)]])\n",
    "    \n",
    "labels = torch.LongTensor(labels)\n",
    "features = torch.FloatTensor(features)\n",
    "# 行归一化\n",
    "# features = torch.nn.functional.normalize(features, p=1, dim=1)\n",
    "\n",
    "edge_index =  torch.LongTensor(edge_index).t()\n",
    "# 添加自环, GCN会默认添加自环, 这里不需要额外处理\n",
    "# edge_index, _ = add_self_loops(edge_index,num_nodes=len(index_dict))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:35.938055Z",
     "start_time": "2021-07-05T12:15:35.930804Z"
    }
   },
   "outputs": [],
   "source": [
    "# 固定种子\n",
    "seed = 1234\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)  \n",
    "np.random.seed(seed)  # Numpy module.\n",
    "# random.seed(seed)  # Python random module.\n",
    "torch.manual_seed(seed)\n",
    "torch.backends.cudnn.benchmark = False\n",
    "torch.backends.cudnn.deterministic = True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:36.055747Z",
     "start_time": "2021-07-05T12:15:36.045737Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Data(edge_index=[2, 10858], x=[2708, 1433], y=[2708])"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "mask = torch.randperm(len(index_dict))\n",
    "\n",
    "# train_mask = mask[:140]\n",
    "# val_mask = mask[140:640]\n",
    "# test_mask = mask[1708:2708]\n",
    "\n",
    "train_mask = mask[:1625]\n",
    "unlabeled_mask = mask[1625:]\n",
    "val_mask = mask[1626:2167]\n",
    "test_mask = mask[2167:]\n",
    "\n",
    "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "cora = Data(x = features, edge_index = edge_index.contiguous(), y = labels).to(device)\n",
    "cora"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:36.168806Z",
     "start_time": "2021-07-05T12:15:36.163094Z"
    }
   },
   "outputs": [],
   "source": [
    "class MLP(torch.nn.Module):\n",
    "    def __init__(self, num_feature, num_label):\n",
    "        super(MLP,self).__init__()\n",
    "        self.LP1 = torch.nn.Linear(num_feature, 64)\n",
    "        self.LP2 = torch.nn.Linear(64, num_label)\n",
    "        \n",
    "        # arcface\n",
    "#         self.W = torch.nn.Parameter(torch.Tensor(16, num_label))\n",
    "#         torch.nn.init.kaiming_uniform_(self.W)\n",
    "        \n",
    "        self.dropout = torch.nn.Dropout(p=0.5)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = self.LP1(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.dropout(x)\n",
    "        x = self.LP2(x)\n",
    "        \n",
    "        # arcface\n",
    "#         x = F.relu(x)\n",
    "#         x_norm = F.normalize(x)\n",
    "#         W_norm = F.normalize(self.W, dim=0)\n",
    "#         x = x @ self.W\n",
    "#         return x\n",
    "        \n",
    "        return F.log_softmax(x, dim=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:15:36.279907Z",
     "start_time": "2021-07-05T12:15:36.275178Z"
    }
   },
   "outputs": [],
   "source": [
    "#计算arcface损失\n",
    "def arcface_loss(cosine, targ, m=.4):\n",
    "    # m = 0.4, 相当于20度\n",
    "\n",
    "    # this prevents nan when a value slightly crosses 1.0 due to numerical error\n",
    "    # 防止数值错误，余弦值大于1\n",
    "    cosine = cosine.clip(-1+1e-7, 1-1e-7) \n",
    "    # Step 3:\n",
    "    # 计算arccos值\n",
    "    arcosine = cosine.arccos()\n",
    "    # Step 4:\n",
    "    # 给真标签角度添加一个常数项m\n",
    "    arcosine += F.one_hot(targ, num_classes = len(label_to_index)) * m\n",
    "    # Step 5:\n",
    "    # 变换回cos值\n",
    "    cosine2 = arcosine.cos()\n",
    "    # Step 6:\n",
    "    # 使用交叉熵损失计算\n",
    "    return F.cross_entropy(cosine2, targ)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:18:05.475205Z",
     "start_time": "2021-07-05T12:18:04.952174Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0 loss: 1.9164\n",
      "epoch: 1 loss: 1.8069\n",
      "epoch: 2 loss: 1.6676\n",
      "epoch: 3 loss: 1.5286\n",
      "epoch: 4 loss: 1.3673\n",
      "epoch: 5 loss: 1.2136\n",
      "epoch: 6 loss: 1.0709\n",
      "epoch: 7 loss: 0.9199\n",
      "epoch: 8 loss: 0.8066\n",
      "epoch: 9 loss: 0.6937\n",
      "Accuracy: 0.7431\n",
      "epoch: 10 loss: 0.6130\n",
      "epoch: 11 loss: 0.5221\n",
      "epoch: 12 loss: 0.4600\n",
      "epoch: 13 loss: 0.4084\n",
      "epoch: 14 loss: 0.3684\n",
      "epoch: 15 loss: 0.3187\n",
      "epoch: 16 loss: 0.2977\n",
      "epoch: 17 loss: 0.2478\n",
      "epoch: 18 loss: 0.2241\n",
      "epoch: 19 loss: 0.2135\n",
      "Accuracy: 0.7616\n",
      "epoch: 20 loss: 0.1861\n",
      "epoch: 21 loss: 0.1659\n",
      "epoch: 22 loss: 0.1491\n",
      "epoch: 23 loss: 0.1400\n",
      "epoch: 24 loss: 0.1311\n",
      "epoch: 25 loss: 0.1195\n",
      "epoch: 26 loss: 0.1060\n",
      "epoch: 27 loss: 0.1069\n",
      "epoch: 28 loss: 0.1020\n",
      "epoch: 29 loss: 0.0884\n",
      "Accuracy: 0.7523\n",
      "epoch: 30 loss: 0.0896\n",
      "epoch: 31 loss: 0.0853\n",
      "epoch: 32 loss: 0.0762\n",
      "epoch: 33 loss: 0.0768\n",
      "epoch: 34 loss: 0.0684\n",
      "epoch: 35 loss: 0.0738\n",
      "epoch: 36 loss: 0.0690\n",
      "epoch: 37 loss: 0.0701\n",
      "epoch: 38 loss: 0.0665\n",
      "epoch: 39 loss: 0.0640\n",
      "Accuracy: 0.7431\n",
      "epoch: 40 loss: 0.0638\n",
      "epoch: 41 loss: 0.0611\n",
      "epoch: 42 loss: 0.0684\n",
      "epoch: 43 loss: 0.0626\n",
      "epoch: 44 loss: 0.0613\n",
      "epoch: 45 loss: 0.0620\n",
      "epoch: 46 loss: 0.0604\n",
      "epoch: 47 loss: 0.0606\n",
      "epoch: 48 loss: 0.0655\n",
      "epoch: 49 loss: 0.0592\n",
      "Accuracy: 0.7412\n",
      "epoch: 50 loss: 0.0616\n",
      "epoch: 51 loss: 0.0599\n",
      "epoch: 52 loss: 0.0572\n",
      "epoch: 53 loss: 0.0604\n",
      "epoch: 54 loss: 0.0560\n",
      "epoch: 55 loss: 0.0590\n",
      "epoch: 56 loss: 0.0633\n",
      "epoch: 57 loss: 0.0613\n",
      "epoch: 58 loss: 0.0628\n",
      "epoch: 59 loss: 0.0573\n",
      "Accuracy: 0.7560\n",
      "epoch: 60 loss: 0.0588\n",
      "epoch: 61 loss: 0.0583\n",
      "epoch: 62 loss: 0.0590\n",
      "epoch: 63 loss: 0.0593\n",
      "epoch: 64 loss: 0.0583\n",
      "epoch: 65 loss: 0.0632\n",
      "epoch: 66 loss: 0.0621\n",
      "epoch: 67 loss: 0.0565\n",
      "epoch: 68 loss: 0.0608\n",
      "epoch: 69 loss: 0.0563\n",
      "Accuracy: 0.7431\n",
      "epoch: 70 loss: 0.0523\n",
      "epoch: 71 loss: 0.0571\n",
      "epoch: 72 loss: 0.0575\n",
      "epoch: 73 loss: 0.0559\n",
      "epoch: 74 loss: 0.0568\n",
      "epoch: 75 loss: 0.0537\n",
      "epoch: 76 loss: 0.0521\n",
      "epoch: 77 loss: 0.0556\n",
      "epoch: 78 loss: 0.0559\n",
      "epoch: 79 loss: 0.0530\n",
      "Accuracy: 0.7320\n",
      "epoch: 80 loss: 0.0513\n",
      "epoch: 81 loss: 0.0513\n",
      "epoch: 82 loss: 0.0558\n",
      "epoch: 83 loss: 0.0541\n",
      "epoch: 84 loss: 0.0535\n",
      "epoch: 85 loss: 0.0478\n",
      "epoch: 86 loss: 0.0477\n",
      "epoch: 87 loss: 0.0554\n",
      "epoch: 88 loss: 0.0521\n",
      "epoch: 89 loss: 0.0459\n",
      "Accuracy: 0.7394\n",
      "epoch: 90 loss: 0.0490\n",
      "epoch: 91 loss: 0.0510\n",
      "epoch: 92 loss: 0.0530\n",
      "epoch: 93 loss: 0.0477\n",
      "epoch: 94 loss: 0.0468\n",
      "epoch: 95 loss: 0.0528\n",
      "epoch: 96 loss: 0.0453\n",
      "epoch: 97 loss: 0.0504\n",
      "epoch: 98 loss: 0.0471\n",
      "epoch: 99 loss: 0.0511\n",
      "Accuracy: 0.7486\n",
      "epoch: 100 loss: 0.0486\n",
      "epoch: 101 loss: 0.0465\n",
      "epoch: 102 loss: 0.0488\n",
      "epoch: 103 loss: 0.0500\n",
      "epoch: 104 loss: 0.0456\n",
      "epoch: 105 loss: 0.0449\n",
      "epoch: 106 loss: 0.0468\n",
      "epoch: 107 loss: 0.0454\n",
      "epoch: 108 loss: 0.0421\n",
      "epoch: 109 loss: 0.0465\n",
      "Accuracy: 0.7468\n",
      "epoch: 110 loss: 0.0455\n",
      "epoch: 111 loss: 0.0481\n",
      "epoch: 112 loss: 0.0452\n",
      "epoch: 113 loss: 0.0452\n",
      "epoch: 114 loss: 0.0416\n",
      "epoch: 115 loss: 0.0427\n",
      "epoch: 116 loss: 0.0443\n",
      "epoch: 117 loss: 0.0417\n",
      "epoch: 118 loss: 0.0425\n",
      "epoch: 119 loss: 0.0420\n",
      "Accuracy: 0.7431\n",
      "epoch: 120 loss: 0.0444\n",
      "epoch: 121 loss: 0.0451\n",
      "epoch: 122 loss: 0.0488\n",
      "epoch: 123 loss: 0.0423\n",
      "epoch: 124 loss: 0.0470\n",
      "epoch: 125 loss: 0.0431\n",
      "epoch: 126 loss: 0.0438\n",
      "epoch: 127 loss: 0.0422\n",
      "epoch: 128 loss: 0.0430\n",
      "epoch: 129 loss: 0.0408\n",
      "Accuracy: 0.7338\n",
      "epoch: 130 loss: 0.0445\n",
      "epoch: 131 loss: 0.0407\n",
      "epoch: 132 loss: 0.0418\n",
      "epoch: 133 loss: 0.0400\n",
      "epoch: 134 loss: 0.0373\n",
      "epoch: 135 loss: 0.0406\n",
      "epoch: 136 loss: 0.0394\n",
      "epoch: 137 loss: 0.0427\n",
      "epoch: 138 loss: 0.0390\n",
      "epoch: 139 loss: 0.0446\n",
      "Accuracy: 0.7468\n",
      "epoch: 140 loss: 0.0392\n",
      "epoch: 141 loss: 0.0387\n",
      "epoch: 142 loss: 0.0427\n",
      "epoch: 143 loss: 0.0415\n",
      "epoch: 144 loss: 0.0400\n",
      "epoch: 145 loss: 0.0361\n",
      "epoch: 146 loss: 0.0418\n",
      "epoch: 147 loss: 0.0396\n",
      "epoch: 148 loss: 0.0401\n",
      "epoch: 149 loss: 0.0399\n",
      "Accuracy: 0.7338\n",
      "epoch: 150 loss: 0.0432\n",
      "epoch: 151 loss: 0.0395\n",
      "epoch: 152 loss: 0.0374\n",
      "epoch: 153 loss: 0.0431\n",
      "epoch: 154 loss: 0.0372\n",
      "epoch: 155 loss: 0.0398\n",
      "epoch: 156 loss: 0.0382\n",
      "epoch: 157 loss: 0.0405\n",
      "epoch: 158 loss: 0.0395\n",
      "epoch: 159 loss: 0.0389\n",
      "Accuracy: 0.7375\n",
      "epoch: 160 loss: 0.0334\n",
      "epoch: 161 loss: 0.0418\n",
      "epoch: 162 loss: 0.0369\n",
      "epoch: 163 loss: 0.0378\n",
      "epoch: 164 loss: 0.0372\n",
      "epoch: 165 loss: 0.0361\n",
      "epoch: 166 loss: 0.0358\n",
      "epoch: 167 loss: 0.0413\n",
      "epoch: 168 loss: 0.0385\n",
      "epoch: 169 loss: 0.0362\n",
      "Accuracy: 0.7449\n",
      "epoch: 170 loss: 0.0399\n",
      "epoch: 171 loss: 0.0348\n",
      "epoch: 172 loss: 0.0346\n",
      "epoch: 173 loss: 0.0396\n",
      "epoch: 174 loss: 0.0383\n",
      "epoch: 175 loss: 0.0404\n",
      "epoch: 176 loss: 0.0372\n",
      "epoch: 177 loss: 0.0380\n",
      "epoch: 178 loss: 0.0381\n",
      "epoch: 179 loss: 0.0367\n",
      "Accuracy: 0.7449\n",
      "epoch: 180 loss: 0.0391\n",
      "epoch: 181 loss: 0.0387\n",
      "epoch: 182 loss: 0.0379\n",
      "epoch: 183 loss: 0.0372\n",
      "epoch: 184 loss: 0.0333\n",
      "epoch: 185 loss: 0.0363\n",
      "epoch: 186 loss: 0.0377\n",
      "epoch: 187 loss: 0.0360\n",
      "epoch: 188 loss: 0.0374\n",
      "epoch: 189 loss: 0.0349\n",
      "Accuracy: 0.7375\n",
      "epoch: 190 loss: 0.0416\n",
      "epoch: 191 loss: 0.0375\n",
      "epoch: 192 loss: 0.0343\n",
      "epoch: 193 loss: 0.0367\n",
      "epoch: 194 loss: 0.0366\n",
      "epoch: 195 loss: 0.0354\n",
      "epoch: 196 loss: 0.0371\n",
      "epoch: 197 loss: 0.0338\n",
      "epoch: 198 loss: 0.0332\n",
      "epoch: 199 loss: 0.0382\n",
      "Accuracy: 0.7449\n"
     ]
    }
   ],
   "source": [
    "model1 = MLP(features.shape[1], len(label_to_index)).to(device)\n",
    "\n",
    "optimizer = torch.optim.Adam(model1.parameters(), lr=0.01, weight_decay=5e-4)\n",
    "\n",
    "for epoch in range(200):\n",
    "    optimizer.zero_grad()\n",
    "    out = model1(cora.x[train_mask])\n",
    "    loss = F.nll_loss(out, cora.y[train_mask])\n",
    "#     loss = arcface_loss(out, cora.y[train_mask])\n",
    "    print('epoch: %d loss: %.4f' %(epoch, loss))\n",
    "    loss.backward()\n",
    "    optimizer.step()\n",
    "    \n",
    "    if((epoch + 1)% 10 == 0):\n",
    "        model1.eval()\n",
    "        _, pred = model1(cora.x[test_mask]).max(dim=1)\n",
    "        correct = int(pred.eq(cora.y[test_mask]).sum().item())\n",
    "        acc = correct / len(test_mask)\n",
    "        print('Accuracy: {:.4f}'.format(acc))\n",
    "        model1.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:18:08.650310Z",
     "start_time": "2021-07-05T12:18:08.639520Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy: 0.7449\n"
     ]
    }
   ],
   "source": [
    "model1.eval()\n",
    "out1 = model1(cora.x)\n",
    "_, pred = out1.max(dim=1)\n",
    "correct = int(pred[test_mask].eq(cora.y[test_mask]).sum().item())\n",
    "acc = correct / len(test_mask)\n",
    "print('Accuracy: {:.4f}'.format(acc))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:18:14.908001Z",
     "start_time": "2021-07-05T12:18:09.324079Z"
    }
   },
   "outputs": [],
   "source": [
    "ts = TSNE(n_components=2)\n",
    "ts.fit_transform(out1.to('cpu').detach().numpy())\n",
    "\n",
    "x1 = ts.embedding_\n",
    "y1 = cora.y.to('cpu').detach().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:18:15.303150Z",
     "start_time": "2021-07-05T12:18:15.011992Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/songwenyu/gnnlab/mlp.html'"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pyecharts.options as opts\n",
    "from pyecharts.charts import Graph\n",
    "\n",
    "from pyecharts.globals import CurrentConfig, NotebookType\n",
    "CurrentConfig.NOTEBOOK_TYPE = NotebookType.NTERACT\n",
    "\n",
    "colors = ['#191970','#008000','#FF0000','#FFFF00', '#00FFFF', '#C71585', '#00FA9A']\n",
    "\n",
    "nodes = []\n",
    "\n",
    "for i in range(cora.x.shape[0]):\n",
    "    nodes.append(\n",
    "                {\"x\": int(x1[i][0]),\n",
    "                 \"y\": int(x1[i][1]),\n",
    "                 \"id\": str(i),\n",
    "                 \"name\": str(i),\n",
    "                 \"symbolSize\": 3,\n",
    "                 \"itemStyle\": {\"normal\": {\"color\": colors[y1[i]]}}}\n",
    "                )\n",
    "    \n",
    "edges = [\n",
    "    {\"source\": str(edge[0]), \"target\": str(edge[1])} for edge in draw_edge_index\n",
    "]\n",
    "\n",
    "(\n",
    "    Graph(init_opts=opts.InitOpts(width=\"1600px\", height=\"800px\"))\n",
    "    .add(\n",
    "        series_name=\"\",\n",
    "        nodes=nodes,\n",
    "        links=edges,\n",
    "        layout=\"none\",\n",
    "        is_roam=True,\n",
    "        is_focusnode=True,\n",
    "        label_opts=opts.LabelOpts(is_show=False),\n",
    "        linestyle_opts=opts.LineStyleOpts(width=0.5, curve=0, opacity=0.7),\n",
    "    )\n",
    "    .set_global_opts(title_opts=opts.TitleOpts(title=\"NPM Dependencies\"))\n",
    "#     .render_notebook()\n",
    "    .render(\"mlp.html\")\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-07-05T12:18:15.637950Z",
     "start_time": "2021-07-05T12:18:15.364640Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'/home/songwenyu/gnnlab/mlp_pred.html'"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import pyecharts.options as opts\n",
    "from pyecharts.charts import Graph\n",
    "\n",
    "from pyecharts.globals import CurrentConfig, NotebookType\n",
    "CurrentConfig.NOTEBOOK_TYPE = NotebookType.NTERACT\n",
    "\n",
    "colors = ['#191970','#008000','#FF0000','#FFFF00', '#00FFFF', '#C71585', '#00FA9A']\n",
    "\n",
    "nodes = []\n",
    "\n",
    "for i in range(cora.x.shape[0]):\n",
    "    nodes.append(\n",
    "                {\"x\": int(x1[i][0]),\n",
    "                 \"y\": int(x1[i][1]),\n",
    "                 \"id\": str(i),\n",
    "                 \"name\": str(i),\n",
    "                 \"symbolSize\": 3,\n",
    "                 \"itemStyle\": {\"normal\": {\"color\": colors[pred[i]]}}}\n",
    "                )\n",
    "    \n",
    "edges = [\n",
    "    {\"source\": str(edge[0]), \"target\": str(edge[1])} for edge in draw_edge_index\n",
    "]\n",
    "\n",
    "(\n",
    "    Graph(init_opts=opts.InitOpts(width=\"1600px\", height=\"800px\"))\n",
    "    .add(\n",
    "        series_name=\"\",\n",
    "        nodes=nodes,\n",
    "        links=edges,\n",
    "        layout=\"none\",\n",
    "        is_roam=True,\n",
    "        is_focusnode=True,\n",
    "        label_opts=opts.LabelOpts(is_show=False),\n",
    "        linestyle_opts=opts.LineStyleOpts(width=0.5, curve=0, opacity=0.7),\n",
    "    )\n",
    "    .set_global_opts(title_opts=opts.TitleOpts(title=\"NPM Dependencies\"))\n",
    "#     .render_notebook()\n",
    "    .render(\"mlp_pred.html\")\n",
    ")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "song",
   "language": "python",
   "name": "song"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
