{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "bfa27898-6359-4b99-8733-5901e4724e5a",
   "metadata": {},
   "source": [
    "#### PyTorch lib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "58682398-56c2-4b38-8205-41d844c8e7bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch  \n",
    "from torch.nn import functional as F  \n",
    "from torch_geometric.nn import GCNConv  \n",
    "from torch_geometric.data import Data  "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5b287113-0b3d-4172-a006-c84d6c2c302b",
   "metadata": {},
   "source": [
    "#### NLP lib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "52b71a52-6dee-4cfc-99d1-8c27fc73daf9",
   "metadata": {},
   "outputs": [],
   "source": [
    "import spacy"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d4f076d7-ba1f-46b3-9031-d43bbe835da0",
   "metadata": {},
   "source": [
    "#### 数学计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "10b9bc67-bdd5-43cb-9ca8-ab52ffe1a335",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np  \n",
    "import networkx as nx   # 图结构"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "ec05bb05-7ee5-457b-bacc-172ab0695cc8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.12.1+cu102\n"
     ]
    }
   ],
   "source": [
    "import torch  \n",
    "print(torch.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09dfa598",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义一个简单的GCN模型  \n",
    "class GCN(torch.nn.Module):  \n",
    "    def __init__(self, num_features, hidden_channels, num_classes):  \n",
    "        super(GCN, self).__init__()  \n",
    "        self.conv1 = GCNConv(num_features, hidden_channels)  \n",
    "        self.conv2 = GCNConv(hidden_channels, num_classes)  \n",
    "  \n",
    "    def forward(self, x, edge_index):  \n",
    "        x = self.conv1(x, edge_index)  \n",
    "        x = F.relu(x)  \n",
    "        x = F.dropout(x, p=0.5, training=self.training)  \n",
    "        x = self.conv2(x, edge_index)  \n",
    "        return F.log_softmax(x, dim=1)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f1699fed",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成模拟数据  \n",
    "num_nodes = 10  # 节点数量  \n",
    "num_features = 5  # 每个节点的特征数  \n",
    "num_classes = 2  # 类别数  \n",
    "edge_index = torch.tensor([[0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9],  \n",
    "                           [1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 7, 6, 8, 7, 9, 8]], dtype=torch.long)  # 模拟的边索引  \n",
    "x = torch.rand((num_nodes, num_features))  # 随机生成节点特征矩阵  \n",
    "y = torch.randint(0, num_classes, (num_nodes,))  # 随机生成节点标签  \n",
    "  \n",
    "# 创建Data对象  \n",
    "data = Data(x=x, edge_index=edge_index, y=y)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4e7c0c18",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例化模型、优化器和损失函数  \n",
    "model = GCN(num_features, hidden_channels=16, num_classes=num_classes)  \n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.01)  \n",
    "criterion = torch.nn.NLLLoss()  # 负对数似然损失函数，适用于分类问题中的log_softmax输出  \n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "4bc09d91-3f37-42b6-bd18-846250ad90f1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 1, Loss: 0.6310\n",
      "Epoch: 2, Loss: 0.8111\n",
      "Epoch: 3, Loss: 0.6794\n",
      "Epoch: 4, Loss: 0.6991\n",
      "Epoch: 5, Loss: 0.7164\n",
      "Epoch: 6, Loss: 0.7150\n",
      "Epoch: 7, Loss: 0.5979\n",
      "Epoch: 8, Loss: 0.7228\n",
      "Epoch: 9, Loss: 0.7167\n",
      "Epoch: 10, Loss: 0.7302\n",
      "Epoch: 11, Loss: 0.6159\n",
      "Epoch: 12, Loss: 0.7528\n",
      "Epoch: 13, Loss: 0.7977\n",
      "Epoch: 14, Loss: 0.7000\n",
      "Epoch: 15, Loss: 0.7067\n",
      "Epoch: 16, Loss: 0.7275\n",
      "Epoch: 17, Loss: 0.6564\n",
      "Epoch: 18, Loss: 0.6587\n",
      "Epoch: 19, Loss: 0.6703\n",
      "Epoch: 20, Loss: 0.6354\n",
      "Epoch: 21, Loss: 0.6554\n",
      "Epoch: 22, Loss: 0.6742\n",
      "Epoch: 23, Loss: 0.6525\n",
      "Epoch: 24, Loss: 0.6577\n",
      "Epoch: 25, Loss: 0.6715\n",
      "Epoch: 26, Loss: 0.6805\n",
      "Epoch: 27, Loss: 0.6593\n",
      "Epoch: 28, Loss: 0.6350\n",
      "Epoch: 29, Loss: 0.6347\n",
      "Epoch: 30, Loss: 0.6300\n",
      "Epoch: 31, Loss: 0.6780\n",
      "Epoch: 32, Loss: 0.6575\n",
      "Epoch: 33, Loss: 0.6288\n",
      "Epoch: 34, Loss: 0.6294\n",
      "Epoch: 35, Loss: 0.6492\n",
      "Epoch: 36, Loss: 0.6471\n",
      "Epoch: 37, Loss: 0.5712\n",
      "Epoch: 38, Loss: 0.6402\n",
      "Epoch: 39, Loss: 0.6073\n",
      "Epoch: 40, Loss: 0.6231\n",
      "Epoch: 41, Loss: 0.6473\n",
      "Epoch: 42, Loss: 0.5677\n",
      "Epoch: 43, Loss: 0.6068\n",
      "Epoch: 44, Loss: 0.5975\n",
      "Epoch: 45, Loss: 0.6318\n",
      "Epoch: 46, Loss: 0.6434\n",
      "Epoch: 47, Loss: 0.6498\n",
      "Epoch: 48, Loss: 0.6652\n",
      "Epoch: 49, Loss: 0.5891\n",
      "Epoch: 50, Loss: 0.6149\n",
      "Epoch: 51, Loss: 0.6188\n",
      "Epoch: 52, Loss: 0.6127\n",
      "Epoch: 53, Loss: 0.6077\n",
      "Epoch: 54, Loss: 0.6192\n",
      "Epoch: 55, Loss: 0.6172\n",
      "Epoch: 56, Loss: 0.6708\n",
      "Epoch: 57, Loss: 0.6491\n",
      "Epoch: 58, Loss: 0.6007\n",
      "Epoch: 59, Loss: 0.6454\n",
      "Epoch: 60, Loss: 0.6013\n",
      "Epoch: 61, Loss: 0.6033\n",
      "Epoch: 62, Loss: 0.6076\n",
      "Epoch: 63, Loss: 0.5949\n",
      "Epoch: 64, Loss: 0.5915\n",
      "Epoch: 65, Loss: 0.6360\n",
      "Epoch: 66, Loss: 0.6207\n",
      "Epoch: 67, Loss: 0.5643\n",
      "Epoch: 68, Loss: 0.6166\n",
      "Epoch: 69, Loss: 0.5453\n",
      "Epoch: 70, Loss: 0.5813\n",
      "Epoch: 71, Loss: 0.6522\n",
      "Epoch: 72, Loss: 0.6607\n",
      "Epoch: 73, Loss: 0.5843\n",
      "Epoch: 74, Loss: 0.6000\n",
      "Epoch: 75, Loss: 0.6265\n",
      "Epoch: 76, Loss: 0.5833\n",
      "Epoch: 77, Loss: 0.6205\n",
      "Epoch: 78, Loss: 0.5551\n",
      "Epoch: 79, Loss: 0.5543\n",
      "Epoch: 80, Loss: 0.5988\n",
      "Epoch: 81, Loss: 0.5464\n",
      "Epoch: 82, Loss: 0.5817\n",
      "Epoch: 83, Loss: 0.5823\n",
      "Epoch: 84, Loss: 0.5739\n",
      "Epoch: 85, Loss: 0.5918\n",
      "Epoch: 86, Loss: 0.6592\n",
      "Epoch: 87, Loss: 0.5869\n",
      "Epoch: 88, Loss: 0.5515\n",
      "Epoch: 89, Loss: 0.6240\n",
      "Epoch: 90, Loss: 0.6107\n",
      "Epoch: 91, Loss: 0.5476\n",
      "Epoch: 92, Loss: 0.5666\n",
      "Epoch: 93, Loss: 0.5869\n",
      "Epoch: 94, Loss: 0.6249\n",
      "Epoch: 95, Loss: 0.5912\n",
      "Epoch: 96, Loss: 0.5630\n",
      "Epoch: 97, Loss: 0.5254\n",
      "Epoch: 98, Loss: 0.5908\n",
      "Epoch: 99, Loss: 0.6105\n",
      "Epoch: 100, Loss: 0.5920\n",
      "Epoch: 101, Loss: 0.5770\n",
      "Epoch: 102, Loss: 0.6026\n",
      "Epoch: 103, Loss: 0.5116\n",
      "Epoch: 104, Loss: 0.5150\n",
      "Epoch: 105, Loss: 0.6269\n",
      "Epoch: 106, Loss: 0.5581\n",
      "Epoch: 107, Loss: 0.6138\n",
      "Epoch: 108, Loss: 0.5466\n",
      "Epoch: 109, Loss: 0.5384\n",
      "Epoch: 110, Loss: 0.6627\n",
      "Epoch: 111, Loss: 0.5404\n",
      "Epoch: 112, Loss: 0.5001\n",
      "Epoch: 113, Loss: 0.5618\n",
      "Epoch: 114, Loss: 0.5822\n",
      "Epoch: 115, Loss: 0.5964\n",
      "Epoch: 116, Loss: 0.4924\n",
      "Epoch: 117, Loss: 0.5712\n",
      "Epoch: 118, Loss: 0.5743\n",
      "Epoch: 119, Loss: 0.6086\n",
      "Epoch: 120, Loss: 0.5643\n",
      "Epoch: 121, Loss: 0.6745\n",
      "Epoch: 122, Loss: 0.6209\n",
      "Epoch: 123, Loss: 0.5575\n",
      "Epoch: 124, Loss: 0.6332\n",
      "Epoch: 125, Loss: 0.5328\n",
      "Epoch: 126, Loss: 0.5819\n",
      "Epoch: 127, Loss: 0.5252\n",
      "Epoch: 128, Loss: 0.5766\n",
      "Epoch: 129, Loss: 0.5394\n",
      "Epoch: 130, Loss: 0.5620\n",
      "Epoch: 131, Loss: 0.5615\n",
      "Epoch: 132, Loss: 0.5076\n",
      "Epoch: 133, Loss: 0.5269\n",
      "Epoch: 134, Loss: 0.6378\n",
      "Epoch: 135, Loss: 0.4696\n",
      "Epoch: 136, Loss: 0.6390\n",
      "Epoch: 137, Loss: 0.5453\n",
      "Epoch: 138, Loss: 0.6134\n",
      "Epoch: 139, Loss: 0.5124\n",
      "Epoch: 140, Loss: 0.6200\n",
      "Epoch: 141, Loss: 0.6257\n",
      "Epoch: 142, Loss: 0.5044\n",
      "Epoch: 143, Loss: 0.4806\n",
      "Epoch: 144, Loss: 0.5525\n",
      "Epoch: 145, Loss: 0.5868\n",
      "Epoch: 146, Loss: 0.5152\n",
      "Epoch: 147, Loss: 0.5037\n",
      "Epoch: 148, Loss: 0.5627\n",
      "Epoch: 149, Loss: 0.5363\n",
      "Epoch: 150, Loss: 0.5652\n",
      "Epoch: 151, Loss: 0.5165\n",
      "Epoch: 152, Loss: 0.6302\n",
      "Epoch: 153, Loss: 0.5254\n",
      "Epoch: 154, Loss: 0.6389\n",
      "Epoch: 155, Loss: 0.5445\n",
      "Epoch: 156, Loss: 0.5654\n",
      "Epoch: 157, Loss: 0.5458\n",
      "Epoch: 158, Loss: 0.5330\n",
      "Epoch: 159, Loss: 0.5707\n",
      "Epoch: 160, Loss: 0.5263\n",
      "Epoch: 161, Loss: 0.6549\n",
      "Epoch: 162, Loss: 0.5670\n",
      "Epoch: 163, Loss: 0.5441\n",
      "Epoch: 164, Loss: 0.6227\n",
      "Epoch: 165, Loss: 0.5584\n",
      "Epoch: 166, Loss: 0.5559\n",
      "Epoch: 167, Loss: 0.5612\n",
      "Epoch: 168, Loss: 0.5274\n",
      "Epoch: 169, Loss: 0.5379\n",
      "Epoch: 170, Loss: 0.5823\n",
      "Epoch: 171, Loss: 0.4795\n",
      "Epoch: 172, Loss: 0.4943\n",
      "Epoch: 173, Loss: 0.5291\n",
      "Epoch: 174, Loss: 0.5334\n",
      "Epoch: 175, Loss: 0.5545\n",
      "Epoch: 176, Loss: 0.5179\n",
      "Epoch: 177, Loss: 0.5041\n",
      "Epoch: 178, Loss: 0.5227\n",
      "Epoch: 179, Loss: 0.5640\n",
      "Epoch: 180, Loss: 0.4907\n",
      "Epoch: 181, Loss: 0.5230\n",
      "Epoch: 182, Loss: 0.4882\n",
      "Epoch: 183, Loss: 0.5992\n",
      "Epoch: 184, Loss: 0.5776\n",
      "Epoch: 185, Loss: 0.5506\n",
      "Epoch: 186, Loss: 0.5397\n",
      "Epoch: 187, Loss: 0.5714\n",
      "Epoch: 188, Loss: 0.5655\n",
      "Epoch: 189, Loss: 0.5501\n",
      "Epoch: 190, Loss: 0.5809\n",
      "Epoch: 191, Loss: 0.5024\n",
      "Epoch: 192, Loss: 0.5444\n",
      "Epoch: 193, Loss: 0.5335\n",
      "Epoch: 194, Loss: 0.5415\n",
      "Epoch: 195, Loss: 0.5718\n",
      "Epoch: 196, Loss: 0.5627\n",
      "Epoch: 197, Loss: 0.4876\n",
      "Epoch: 198, Loss: 0.5816\n",
      "Epoch: 199, Loss: 0.5355\n",
      "Epoch: 200, Loss: 0.4864\n",
      "Accuracy: 0.7000\n"
     ]
    }
   ],
   "source": [
    "# 训练模型  \n",
    "def train():  \n",
    "    model.train()  \n",
    "    optimizer.zero_grad()  \n",
    "    out = model(data.x, data.edge_index)  \n",
    "    loss = criterion(out[data.train_mask], data.y[data.train_mask])  # 只对训练集中的节点计算损失  \n",
    "    loss.backward()  \n",
    "    optimizer.step()  \n",
    "    return loss.item()  \n",
    "  \n",
    "# 这里我们还没有定义train_mask，因此需要先划分数据集（训练集、验证集、测试集）  \n",
    "# 但为了简化示例，我们将在这里对整个数据集进行训练（这不是一个好的实践）  \n",
    "data.train_mask = torch.ones(num_nodes, dtype=torch.bool)  # 在实际应用中，你应该使用适当的数据划分  \n",
    "  \n",
    "# 进行训练  \n",
    "for epoch in range(1, 201):  \n",
    "    loss = train()  \n",
    "    print(f'Epoch: {epoch}, Loss: {loss:.4f}')  \n",
    "      \n",
    "# 测试模型（在这个简化示例中，我们没有单独的测试集，所以只是演示如何调用模型）  \n",
    "model.eval()  \n",
    "_, pred = model(data.x, data.edge_index).max(dim=1)  \n",
    "correct = pred.eq(data.y).sum().item()  \n",
    "acc = correct / num_nodes  \n",
    "print(f'Accuracy: {acc:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9846bd29-ab35-40f4-870c-258bad639225",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
