{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "99155c26-15b1-4e7b-88d0-3fe028e53fb1",
   "metadata": {},
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/lib/libtorch_cuda.so: undefined symbol: ncclCommRegister",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mImportError\u001b[0m                               Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[1], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m  \n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mnn\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01mnn\u001b[39;00m  \n\u001b[1;32m      3\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01moptim\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m \u001b[38;5;21;01moptim\u001b[39;00m  \n",
      "File \u001b[0;32m/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/__init__.py:237\u001b[0m\n\u001b[1;32m    235\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m USE_GLOBAL_DEPS:\n\u001b[1;32m    236\u001b[0m         _load_global_deps()\n\u001b[0;32m--> 237\u001b[0m     \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mtorch\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01m_C\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;241m*\u001b[39m  \u001b[38;5;66;03m# noqa: F403\u001b[39;00m\n\u001b[1;32m    239\u001b[0m \u001b[38;5;66;03m# Appease the type checker; ordinarily this binding is inserted by the\u001b[39;00m\n\u001b[1;32m    240\u001b[0m \u001b[38;5;66;03m# torch._C module initialization code in C\u001b[39;00m\n\u001b[1;32m    241\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m TYPE_CHECKING:\n",
      "\u001b[0;31mImportError\u001b[0m: /opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/lib/libtorch_cuda.so: undefined symbol: ncclCommRegister"
     ]
    }
   ],
   "source": [
    "import torch  \n",
    "import torch.nn as nn  \n",
    "import torch.optim as optim  \n",
    "import torchvision  \n",
    "import torchvision.transforms as transforms  \n",
    "import torch.nn.functional as F  \n",
    "from tqdm import tqdm  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "c3c35c9e-fbc1-42d6-bca0-eb62ad247680",
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNet(nn.Module):  \n",
    "    def __init__(self, num_classes=10):  \n",
    "        super(LeNet, self).__init__()  \n",
    "        # Convolution (In LeNet-5, 32x32 images are given as input. Hence padding of 2 is done below)  \n",
    "        self.conv1 = nn.Conv2d(1, 6, 5, padding=2)  \n",
    "        self.conv2 = nn.Conv2d(6, 16, 5)  \n",
    "        # Fully connected layers  \n",
    "        self.fc1 = nn.Linear(16*5*5, 120)  \n",
    "        self.fc2 = nn.Linear(120, 84)  \n",
    "        self.fc3 = nn.Linear(84, num_classes)  \n",
    "  \n",
    "    def forward(self, x):  \n",
    "        # Convolution with ReLU activation  \n",
    "        x = F.relu(self.conv1(x))  \n",
    "        # Max pooling over 2x2  \n",
    "        x = F.max_pool2d(x, 2)  \n",
    "  \n",
    "        x = F.relu(self.conv2(x))  \n",
    "        x = F.max_pool2d(x, 2)  \n",
    "  \n",
    "        # Flatten the tensor  \n",
    "        x = x.view(x.size(0), -1)  \n",
    "  \n",
    "        x = F.relu(self.fc1(x))  \n",
    "        x = F.relu(self.fc2(x))  \n",
    "        x = self.fc3(x)  \n",
    "  \n",
    "        return x  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ed729d1b-9e89-4720-9361-c8bf7fb55bc3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LeNet(\n",
      "  (conv1): Conv2d(1, 6, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))\n",
      "  (conv2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
      "  (fc1): Linear(in_features=400, out_features=120, bias=True)\n",
      "  (fc2): Linear(in_features=120, out_features=84, bias=True)\n",
      "  (fc3): Linear(in_features=84, out_features=10, bias=True)\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "# Instantiate the model  \n",
    "num_classes = 10  # Number of classes in the dataset (e.g., MNIST has 10 classes)  \n",
    "model = LeNet(num_classes=num_classes)  \n",
    "  \n",
    "# Print the model architecture  \n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9826fdc5-e1e5-4d24-9ff0-74d399441957",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to ./data/MNIST/raw/train-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f8f085855eea4ee08adc5fd2afce1352",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/9912422 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/train-images-idx3-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz to ./data/MNIST/raw/train-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "3b665122eb9646c3a44de53794c45677",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/28881 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/train-labels-idx1-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw/t10k-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "d89467dd65cd482fbae8c188f1bd62c3",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/1648877 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/t10k-images-idx3-ubyte.gz to ./data/MNIST/raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9da8e7a8ad414665bcad5f71525cdb47",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/4542 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ./data/MNIST/raw/t10k-labels-idx1-ubyte.gz to ./data/MNIST/raw\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 数据预处理：转换为Tensor并进行归一化  \n",
    "transform = transforms.Compose([  \n",
    "    transforms.ToTensor(),  \n",
    "    transforms.Normalize((0.5,), (0.5,))  # 对于MNIST，只有一个通道，所以只需要一个值进行归一化  \n",
    "])  \n",
    "  \n",
    "# 下载并加载训练数据集  \n",
    "trainset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)  \n",
    "trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)  \n",
    "  \n",
    "# 下载并加载测试数据集  \n",
    "testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)  \n",
    "testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)  \n",
    "  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "59bcbd0d-b0b1-42dc-ac6e-a9d5a70eadef",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实例化模型、损失函数和优化器  \n",
    "model = LeNet()  \n",
    "criterion = nn.CrossEntropyLoss()  # 多分类交叉熵损失  \n",
    "optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "2be2371d-5277-4b7e-b69a-8d9c609b0c81",
   "metadata": {},
   "outputs": [
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[8], line 10\u001b[0m\n\u001b[1;32m      8\u001b[0m outputs \u001b[38;5;241m=\u001b[39m model(inputs)  \u001b[38;5;66;03m# 前向传播  \u001b[39;00m\n\u001b[1;32m      9\u001b[0m loss \u001b[38;5;241m=\u001b[39m criterion(outputs, labels)  \u001b[38;5;66;03m# 计算损失值  \u001b[39;00m\n\u001b[0;32m---> 10\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# 反向传播计算梯度  \u001b[39;00m\n\u001b[1;32m     11\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mstep()  \u001b[38;5;66;03m# 更新权重参数  \u001b[39;00m\n\u001b[1;32m     12\u001b[0m running_loss \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m loss\u001b[38;5;241m.\u001b[39mitem()  \u001b[38;5;66;03m# 累加损失值  \u001b[39;00m\n",
      "File \u001b[0;32m/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/_tensor.py:363\u001b[0m, in \u001b[0;36mTensor.backward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m    354\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[1;32m    355\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[1;32m    356\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[1;32m    357\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[0;32m   (...)\u001b[0m\n\u001b[1;32m    361\u001b[0m         create_graph\u001b[38;5;241m=\u001b[39mcreate_graph,\n\u001b[1;32m    362\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs)\n\u001b[0;32m--> 363\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[0;32m/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/autograd/__init__.py:173\u001b[0m, in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m    168\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[1;32m    170\u001b[0m \u001b[38;5;66;03m# The reason we repeat same the comment below is that\u001b[39;00m\n\u001b[1;32m    171\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[1;32m    172\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[0;32m--> 173\u001b[0m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[1;32m    174\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m    175\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m)\u001b[49m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# 训练模型  \n",
    "num_epochs = 5  # 设置训练的轮数  \n",
    "for epoch in range(num_epochs):  \n",
    "    running_loss = 0.0  \n",
    "    for i, data in enumerate(trainloader, 0):  \n",
    "        inputs, labels = data  \n",
    "        optimizer.zero_grad()  # 清空梯度缓存  \n",
    "        outputs = model(inputs)  # 前向传播  \n",
    "        loss = criterion(outputs, labels)  # 计算损失值  \n",
    "        loss.backward()  # 反向传播计算梯度  \n",
    "        optimizer.step()  # 更新权重参数  \n",
    "        running_loss += loss.item()  # 累加损失值  \n",
    "    print(f'Epoch {epoch+1}, Loss: {running_loss / (i+1)}')  \n",
    "  \n",
    "print('Training finished.')  \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "ac3964cc-98a4-46f4-9405-7281b2b4b61f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1: 100%|████████████████████████████████████████| 938/938 [17:11<00:00,  1.10s/it, Loss=0.296]\n",
      "Epoch 2: 100%|███████████████████████████████████████| 938/938 [17:03<00:00,  1.09s/it, Loss=0.0575]\n",
      "Epoch 3: 100%|███████████████████████████████████████| 938/938 [16:58<00:00,  1.09s/it, Loss=0.0414]\n",
      "Epoch 4: 100%|███████████████████████████████████████| 938/938 [17:04<00:00,  1.09s/it, Loss=0.0312]\n",
      "Epoch 5: 100%|████████████████████████████████████████| 938/938 [17:05<00:00,  1.09s/it, Loss=0.026]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training finished.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# 训练模型  \n",
    "num_epochs = 5  # 设置训练的轮数  \n",
    "for epoch in range(num_epochs):  \n",
    "    running_loss = 0.0  \n",
    "    pbar = tqdm(enumerate(trainloader), total=len(trainloader), desc=f\"Epoch {epoch+1}\", ncols=100)  # 初始化进度条  \n",
    "    for i, data in pbar:  # 使用tqdm包装过的迭代器  \n",
    "        inputs, labels = data  \n",
    "        optimizer.zero_grad()  # 清空梯度缓存  \n",
    "        outputs = model(inputs)  # 前向传播  \n",
    "        loss = criterion(outputs, labels)  # 计算损失值  \n",
    "        loss.backward()  # 反向传播计算梯度  \n",
    "        optimizer.step()  # 更新权重参数  \n",
    "        running_loss += loss.item()  # 累加损失值  \n",
    "          \n",
    "        # 更新进度条信息（可选，因为tqdm会自动更新进度）  \n",
    "        pbar.set_postfix({'Loss': running_loss / (i+1)})  \n",
    "      \n",
    "    # 关闭进度条（可选，因为tqdm会在循环结束时自动关闭）  \n",
    "    pbar.close()  \n",
    "print('Training finished.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ac374db1-9e9e-414b-8ff5-08e986223e28",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 在测试集上评估模型  \n",
    "correct = 0  \n",
    "total = 0  \n",
    "with torch.no_grad():  # 不需要计算梯度，也不会进行反向传播  \n",
    "    for data in testloader:  \n",
    "        images, labels = data  \n",
    "        outputs = model(images)  # 前向传播，获取预测值  \n",
    "        _, predicted = torch.max(outputs.data, 1)  # 从预测值中选取概率最大的类别作为预测结果  \n",
    "        total += labels.size(0)  # 更新总数量  \n",
    "        correct += (predicted == labels).sum().item()  # 统计预测正确的数量  \n",
    "  \n",
    "print(f'Accuracy of the network on the test images: {100 * correct / total} %')  # 计算准确率并输出"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7924040-8d74-4b12-abd0-81f560f66383",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 确保模型处于评估模式  \n",
    "model.eval()  \n",
    "  \n",
    "# 初始化正确预测的数量和总数  \n",
    "correct = 0  \n",
    "total = 0  \n",
    "  \n",
    "with torch.no_grad():  # 不需要计算梯度，也不会进行反向传播  \n",
    "    test_iterator = tqdm(testloader, desc=\"Testing\", ncols=100)  # 使用tqdm包装测试集迭代器  \n",
    "    for images, labels in test_iterator:  # 遍历测试集中的每一批数据  \n",
    "        outputs = model(images)  # 前向传播，获取预测值  \n",
    "        _, predicted = torch.max(outputs, 1)  # 从预测值中选取概率最大的类别作为预测结果  \n",
    "        total += labels.size(0)  # 更新总数量  \n",
    "        correct += (predicted == labels).sum().item()  # 统计预测正确的数量  \n",
    "          \n",
    "        # 更新进度条信息（可选，tqdm会自动更新进度）  \n",
    "        test_iterator.set_postfix({'Accuracy': 100.0 * correct / total if total > 0 else 0})  \n",
    "  \n",
    "# 输出最终的准确率  \n",
    "print(f'Accuracy of the network on the test images: {100 * correct / total} %')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
