{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "bde9fd48-77a0-4345-9087-62bccd691a3c",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision.datasets as ds\n",
    "import torchvision.transforms as ts\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.autograd import Variable\n",
    "import random\n",
    "\n",
    "torch.manual_seed(777)  # 设置随机种子以确保结果的可复现性\n",
    "\n",
    "# 参数设置\n",
    "batch_size = 100  # 每个批次的样本数量\n",
    "learning_rate = 0.001  # 学习率\n",
    "epochs = 2  # 训练的轮数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "c378decc-c518-4815-be7a-41f5feecc3c8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 403: Forbidden\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\\train-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 9912422/9912422 [00:09<00:00, 1048142.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ../../DATA\\MNIST_data\\MNIST\\raw\\train-images-idx3-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 403: Forbidden\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\\train-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 28881/28881 [00:00<00:00, 117670.99it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ../../DATA\\MNIST_data\\MNIST\\raw\\train-labels-idx1-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 403: Forbidden\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\\t10k-images-idx3-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1648877/1648877 [00:21<00:00, 78334.69it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ../../DATA\\MNIST_data\\MNIST\\raw\\t10k-images-idx3-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\n",
      "\n",
      "Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n",
      "Failed to download (trying next):\n",
      "HTTP Error 403: Forbidden\n",
      "\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz\n",
      "Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\\t10k-labels-idx1-ubyte.gz\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 4542/4542 [00:00<00:00, 2268730.35it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Extracting ../../DATA\\MNIST_data\\MNIST\\raw\\t10k-labels-idx1-ubyte.gz to ../../DATA\\MNIST_data\\MNIST\\raw\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# 加载MNIST数据集\n",
    "ds_train = ds.MNIST(root=r'../../DATA\\MNIST_data',  # 训练集存放路径\n",
    "                    train=True,  # 训练集\n",
    "                    transform=ts.ToTensor(),  # 转换为Tensor\n",
    "                    download=True)  # 如果数据集不存在则下载\n",
    "ds_test = ds.MNIST(root=r'../../DATA\\MNIST_data',  # 测试集存放路径\n",
    "                   train=False,  # 测试集\n",
    "                   transform=ts.ToTensor(),  # 转换为Tensor\n",
    "                   download=True)  # 如果数据集不存在则下载\n",
    "\n",
    "# 创建数据加载器\n",
    "dl = DataLoader(dataset=ds_train, batch_size=batch_size, shuffle=True)  # 随机打乱数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "506054ec-7d3f-4ab1-a4e2-1edc3881d13f",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\lenovo\\AppData\\Local\\Temp\\ipykernel_21500\\1287755330.py:20: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n",
      "  torch.nn.init.xavier_uniform(self.fc.weight)  # 使用Xavier初始化权重\n"
     ]
    }
   ],
   "source": [
    "# 定义CNN模型（包含2个卷积层）\n",
    "class CNN(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        # 第一层卷积层：1个输入通道，32个输出通道，卷积核大小为3，步长为1，填充为1\n",
    "        # 激活函数使用ReLU，池化层使用2x2的最大池化\n",
    "        self.layer1 = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),  # 卷积层\n",
    "            torch.nn.ReLU(),  # 激活函数\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2)  # 池化层\n",
    "        )\n",
    "        # 第二层卷积层：32个输入通道，64个输出通道，卷积核大小为3，步长为1，填充为1\n",
    "        self.layer2 = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),  # 卷积层\n",
    "            torch.nn.ReLU(),  # 激活函数\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2)  # 池化层\n",
    "        )\n",
    "        # 全连接层：7*7*64个输入特征，10个输出特征（对应10个类别）\n",
    "        self.fc = torch.nn.Linear(7 * 7 * 64, 10)\n",
    "        torch.nn.init.xavier_uniform(self.fc.weight)  # 使用Xavier初始化权重\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.layer1(x)  # 通过第一层卷积层\n",
    "        out = self.layer2(out)  # 通过第二层卷积层\n",
    "        out = out.view(out.size(0), -1)  # 展平特征图以输入全连接层\n",
    "        out = self.fc(out)  # 通过全连接层\n",
    "        return out\n",
    "\n",
    "# 实例化CNN模型\n",
    "model = CNN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "483d9021-709f-46c9-8fc9-df6dd44079a5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Learning started. It takes sometime.\n",
      "1 0.22560811042785645\n",
      "2 0.0629836693406105\n",
      "Learning Finished!\n"
     ]
    }
   ],
   "source": [
    "# 定义损失函数和优化器\n",
    "criterion = torch.nn.CrossEntropyLoss()  # Softmax是内部计算的\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
    "\n",
    "# 训练模型\n",
    "print('Learning started. It takes sometime.')\n",
    "for epoch in range(epochs):\n",
    "    avg_cost = 0\n",
    "    total_batch = len(ds_train) // batch_size\n",
    "    for step, (batch_xs, batch_ys) in enumerate(dl):\n",
    "        x = Variable(batch_xs)  # [100, 1, 28, 28] 图像已经是(28x28)大小，不需要reshape\n",
    "        y = Variable(batch_ys)  # [100] 标签不是one-hot编码\n",
    "\n",
    "        optimizer.zero_grad()  # 梯度清零\n",
    "        h = model(x)  # 前向传播\n",
    "        cost = criterion(h, y)  # 计算损失\n",
    "        cost.backward()  # 反向传播\n",
    "        optimizer.step()  # 更新参数\n",
    "\n",
    "        avg_cost += cost / total_batch\n",
    "    print(epoch + 1, avg_cost.item())\n",
    "print('Learning Finished!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "99ca95a1-0c9b-4777-aab3-947a59cab6fe",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\lenovo\\anaconda3\\Lib\\site-packages\\torchvision\\datasets\\mnist.py:81: UserWarning: test_data has been renamed data\n",
      "  warnings.warn(\"test_data has been renamed data\")\n",
      "C:\\Users\\lenovo\\anaconda3\\Lib\\site-packages\\torchvision\\datasets\\mnist.py:71: UserWarning: test_labels has been renamed targets\n",
      "  warnings.warn(\"test_labels has been renamed targets\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy on test set: 0.9722999930381775\n",
      "Accuracy for random sample: 1.0\n"
     ]
    }
   ],
   "source": [
    "# 获取测试数据和标签\n",
    "x_test = ds_test.test_data.view(-1, 1, 28, 28).float()\n",
    "y_test = ds_test.test_labels\n",
    "\n",
    "# 预测测试数据\n",
    "pre = model(x_test)\n",
    "_, predicted = torch.max(pre.data, 1)  # 获取最大值的索引\n",
    "acc = (predicted == y_test.data.long()).float().mean()  # 计算准确率\n",
    "print('Accuracy on test set:', acc.item())\n",
    "\n",
    "# 随机选择一个测试样本来展示\n",
    "r = random.randint(0, len(x_test) - 1)\n",
    "x_r = x_test[r:r+1]\n",
    "y_r = y_test[r:r+1]\n",
    "pre_r = model(x_r)\n",
    "_, pre_r = torch.max(pre_r.data, 1)\n",
    "acc_r = (pre_r == y_r.data.long()).float().mean()\n",
    "print('Accuracy for random sample:', acc_r.item())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "72ee7ad8-3942-4cf0-965a-6acfa7499d69",
   "metadata": {},
   "source": [
    "# 定义CNN模型（包含3个卷积层和2个全连接层）\r\n",
    "class CNN(torch.nn.Module):\r\n",
    "    def __init__(self):\r\n",
    "        super(CNN, self).__init__()\r\n",
    "        # 第一层卷积层：1个输入通道，32个输出通道，卷积核大小为3，步长为1，填充为1\r\n",
    "        # 激活函数使用ReLU，池化层使用2x2的最大池化，dropout比例为0.3\r\n",
    "        self.layer1 = torch.nn.Sequential(\r\n",
    "            torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),  # 卷积层\r\n",
    "            torch.nn.ReLU(),  # 激活函数\r\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2),  # 池化层\r\n",
    "            torch.nn.Dropout(0.3)  # dropout层\r\n",
    "        )\r\n",
    "        # 第二层卷积层：32个输入通道，64个输出通道，卷积核大小为3，步长为1，填充为1\r\n",
    "        self.layer2 = torch.nn.Sequential(\r\n",
    "            torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),  # 卷积层\r\n",
    "            torch.nn.ReLU(),  # 激活函数\r\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2),  # 池化层\r\n",
    "            torch.nn.Dropout(0.3)  # dropout层\r\n",
    "        )\r\n",
    "        # 第三层卷积层：64个输入通道，128个输出通道，卷积核大小为3，步长为1，填充为1\r\n",
    "        self.layer3 = torch.nn.Sequential(\r\n",
    "            torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),  # 卷积层\r\n",
    "            torch.nn.ReLU(),  # 激活函数\r\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1),  # 池化层\r\n",
    "            torch.nn.Dropout(0.3)  # dropout层\r\n",
    "        )\r\n",
    "        # 第四层全连接层：4*4*128个输入特征，625个输出特征\r\n",
    "        self.fc1 = torch.nn.Linear(4 * 4 * 128, 625)\r\n",
    "        torch.nn.init.xavier_uniform(self.fc1.weight)  # 使用Xavier初始化权重\r\n",
    "        self.layer4 = torch.nn.Sequential(\r\n",
    "            self.fc1,\r\n",
    "            torch.nn.ReLU(),\r\n",
    "            torch.nn.Dropout(0.3)\r\n",
    "        )\r\n",
    "        # 第五层全连接层：625个输入特征，10个输出特征（对应10个类别）\r\n",
    "        self.fc2 = torch.nn.Linear(625, 10)\r\n",
    "        torch.nn.init.xavier_uniform(self.fc2.weight)\r\n",
    "\r\n",
    "    def forward(self, x):\r\n",
    "        out = self.layer1(x)  # 通过第一层卷积层\r\n",
    "        out = self.layer2(out)  # 通过第二层卷积层\r\n",
    "        out = self.layer3(out)  # 通过第三层卷积层\r\n",
    "        out = out.view(out.size(0), -1)  # 展平特征图以输入全连接层\r\n",
    "        out = self.layer4(out)  # 通过第四层全连接层\r\n",
    "        out = self.fc2(out)  # 通过第五层全连接层\r\n",
    "        return out\r\n",
    "\r\n",
    "# 实例化CNN模型\r\n",
    "model = CNN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "f070a279-01e1-43e4-8eec-2e15574cef06",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Learning started. It takes sometime.\n",
      "1 0.04855800047516823\n",
      "Learning Finished!\n"
     ]
    }
   ],
   "source": [
    "# 定义损失函数和优化器\n",
    "criterion = torch.nn.CrossEntropyLoss()  # 交叉熵损失函数，内部包含softmax\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)  # Adam优化器\n",
    "\n",
    "# 训练模型\n",
    "print('Learning started. It takes sometime.')\n",
    "for epoch in range(1):  # 只训练1个epoch进行演示\n",
    "    avg_cost = 0\n",
    "    total_batch = len(ds_train) // batch_size\n",
    "    for step, (batch_xs, batch_ys) in enumerate(dl):\n",
    "        x = Variable(batch_xs)  # 将数据转换为Variable\n",
    "        y = Variable(batch_ys)  # 将标签转换为Variable\n",
    "\n",
    "        optimizer.zero_grad()  # 梯度清零\n",
    "        h = model(x)  # 前向传播\n",
    "        cost = criterion(h, y)  # 计算损失\n",
    "        cost.backward()  # 反向传播\n",
    "        optimizer.step()  # 更新参数\n",
    "\n",
    "        avg_cost += cost / total_batch\n",
    "    print(epoch + 1, avg_cost.item())\n",
    "print('Learning Finished!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "46cb13ec-c714-4ad6-9617-3e56ffbd2f4a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.9774)\n",
      "tensor(0.)\n"
     ]
    }
   ],
   "source": [
    "# 测试模型并检查准确率\n",
    "model.eval()  # 将模型设置为评估模式\n",
    "\n",
    "x_test = ds_test.test_data.view(len(ds_test), 1, 28, 28).float()  # 调整测试数据的形状\n",
    "y_test = ds_test.test_labels\n",
    "\n",
    "pre = model(x_test)  # 预测测试数据\n",
    "pre = torch.max(pre.data, 1)[1].float()  # 获取预测结果\n",
    "acc = (pre == y_test.data.float()).float().mean()  # 计算准确率\n",
    "print(acc)\n",
    "\n",
    "# 随机选择一个测试样本来展示\n",
    "r = random.randint(0, len(x_test) - 1)\n",
    "x_r = x_test[r:r + 1]\n",
    "y_r = y_test[r:r + 1]\n",
    "pre_r = model(x_r)  # 预测单个测试样本\n",
    "pre_r = torch.max(pre_r.data, 0)[1].float()  # 获取预测结果\n",
    "acc_r = (pre_r == y_r.data.float()).float().mean()  # 计算准确率\n",
    "print(acc_r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "9b1c427a-eee9-45ba-98a6-4cd14e900ee7",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\lenovo\\AppData\\Local\\Temp\\ipykernel_21500\\1073260537.py:29: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n",
      "  torch.nn.init.xavier_uniform(self.fc1.weight)  # 使用Xavier初始化权重\n",
      "C:\\Users\\lenovo\\AppData\\Local\\Temp\\ipykernel_21500\\1073260537.py:37: UserWarning: nn.init.xavier_uniform is now deprecated in favor of nn.init.xavier_uniform_.\n",
      "  torch.nn.init.xavier_uniform(self.fc2.weight)\n"
     ]
    }
   ],
   "source": [
    "# 定义CNN模型（包含3个卷积层和2个全连接层）\n",
    "class CNN(torch.nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        # 第一层卷积层：1个输入通道，32个输出通道，卷积核大小为3，步长为1，填充为1\n",
    "        # 激活函数使用ReLU，池化层使用2x2的最大池化，dropout比例为0.3\n",
    "        self.layer1 = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),  # 卷积层\n",
    "            torch.nn.ReLU(),  # 激活函数\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2),  # 池化层\n",
    "            torch.nn.Dropout(0.3)  # dropout层\n",
    "        )\n",
    "        # 第二层卷积层：32个输入通道，64个输出通道，卷积核大小为3，步长为1，填充为1\n",
    "        self.layer2 = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),  # 卷积层\n",
    "            torch.nn.ReLU(),  # 激活函数\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2),  # 池化层\n",
    "            torch.nn.Dropout(0.3)  # dropout层\n",
    "        )\n",
    "        # 第三层卷积层：64个输入通道，128个输出通道，卷积核大小为3，步长为1，填充为1\n",
    "        self.layer3 = torch.nn.Sequential(\n",
    "            torch.nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),  # 卷积层\n",
    "            torch.nn.ReLU(),  # 激活函数\n",
    "            torch.nn.MaxPool2d(kernel_size=2, stride=2, padding=1),  # 池化层\n",
    "            torch.nn.Dropout(0.3)  # dropout层\n",
    "        )\n",
    "        # 第四层全连接层：4*4*128个输入特征，625个输出特征\n",
    "        self.fc1 = torch.nn.Linear(4 * 4 * 128, 625)\n",
    "        torch.nn.init.xavier_uniform(self.fc1.weight)  # 使用Xavier初始化权重\n",
    "        self.layer4 = torch.nn.Sequential(\n",
    "            self.fc1,\n",
    "            torch.nn.ReLU(),\n",
    "            torch.nn.Dropout(0.3)\n",
    "        )\n",
    "        # 第五层全连接层：625个输入特征，10个输出特征（对应10个类别）\n",
    "        self.fc2 = torch.nn.Linear(625, 10)\n",
    "        torch.nn.init.xavier_uniform(self.fc2.weight)\n",
    "\n",
    "        # 定义损失函数和优化器\n",
    "        self.criterion = torch.nn.CrossEntropyLoss()  # 交叉熵损失函数，内部包含softmax\n",
    "        self.optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate)  # Adam优化器\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.layer1(x)  # 通过第一层卷积层\n",
    "        out = self.layer2(out)  # 通过第二层卷积层\n",
    "        out = self.layer3(out)  # 通过第三层卷积层\n",
    "        out = out.view(out.size(0), -1)  # 展平特征图以输入全连接层\n",
    "        out = self.layer4(out)  # 通过第四层全连接层\n",
    "        out = self.fc2(out)  # 通过第五层全连接层\n",
    "        return out\n",
    "\n",
    "    def train_model(self, x, y):\n",
    "        # 训练模型一次迭代\n",
    "        self.optimizer.zero_grad()  # 梯度清零\n",
    "        h = self.forward(x)  # 前向传播\n",
    "        self.cost = self.criterion(h, y)  # 计算损失\n",
    "        self.cost.backward()  # 反向传播\n",
    "        self.optimizer.step()  # 更新参数\n",
    "        return self.cost\n",
    "\n",
    "    def predict(self, x):\n",
    "        # 预测数据\n",
    "        self.eval()  # 将模型设置为评估模式\n",
    "        return self.forward(x)\n",
    "\n",
    "    def accuracy(self, x, y):\n",
    "        # 计算准确率\n",
    "        prediction = self.predict(x)  # 预测数据\n",
    "        correct_prediction = (torch.max(prediction.data, 1)[1] == y.data.float())  # 比较预测和实际标签\n",
    "        self.acc = correct_prediction.float().mean()  # 计算准确率\n",
    "        return self.acc\n",
    "\n",
    "# 实例化CNN模型\n",
    "model = CNN()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "aeee43ec-0e6b-4014-b960-cae7bb7f5a4c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Learning started. It takes sometime.\n",
      "1 0.25984081625938416\n",
      "2 0.0749426782131195\n",
      "Learning Finished!\n"
     ]
    }
   ],
   "source": [
    "# 训练模型\n",
    "print('Learning started. It takes sometime.')\n",
    "for epoch in range(epochs):\n",
    "    avg_cost = 0\n",
    "    total_batch = len(ds_train) // batch_size\n",
    "    for step, (batch_xs, batch_ys) in enumerate(dl):\n",
    "        x = Variable(batch_xs)  # 将数据转换为Variable\n",
    "        y = Variable(batch_ys)  # 将标签转换为Variable\n",
    "\n",
    "        cost = model.train_model(x, y)  # 训练模型一次迭代\n",
    "\n",
    "        avg_cost += cost / total_batch\n",
    "    print(epoch + 1, avg_cost.item())\n",
    "print('Learning Finished!')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "d6ab7f1b-26fd-4d0b-af9e-1df649292d76",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor(0.9750)\n",
      "tensor(0.)\n"
     ]
    }
   ],
   "source": [
    "# 测试模型并检查准确率\n",
    "model.eval()  # 将模型设置为评估模式\n",
    "\n",
    "x_test = ds_test.test_data.view(len(ds_test), 1, 28, 28).float()  # 调整测试数据的形状\n",
    "y_test = ds_test.test_labels\n",
    "\n",
    "pre = model(x_test)  # 预测测试数据\n",
    "pre = torch.max(pre.data, 1)[1].float()  # 获取预测结果\n",
    "acc = (pre == y_test.data).float().mean()  # 计算准确率\n",
    "print(acc)\n",
    "\n",
    "# 随机选择一个测试样本来展示\n",
    "r = random.randint(0, len(x_test) - 1)\n",
    "x_r = x_test[r:r + 1]\n",
    "y_r = y_test[r:r + 1]\n",
    "pre_r = model(x_r)  # 预测单个测试样本\n",
    "pre_r = torch.max(pre_r.data, 1)[1].float()  # 获取预测结果\n",
    "acc_r = (pre_r == y_r.data).float().mean()  # 计算准确率\n",
    "print(acc_r)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "39e2d3c5-c98d-42e8-b8f2-762ddc5c1ac8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aac2ec80-61fe-4f00-bbf0-625355571d47",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
