{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torchvision import datasets, transforms\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 9.91M/9.91M [00:08<00:00, 1.11MB/s]\n",
      "100%|██████████| 28.9k/28.9k [00:00<00:00, 41.3kB/s]\n",
      "100%|██████████| 1.65M/1.65M [00:02<00:00, 683kB/s]\n",
      "100%|██████████| 4.54k/4.54k [00:00<00:00, 2.06MB/s]\n"
     ]
    }
   ],
   "source": [
    "# 定义超参数\n",
    "input_size = 28\n",
    "num_classes = 10\n",
    "num_epochs = 3\n",
    "batch_size = 64\n",
    "\n",
    "# 训练集\n",
    "train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)\n",
    "\n",
    "# 测试集\n",
    "test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor())\n",
    "\n",
    "# 构建batch数据\n",
    "train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n",
    "test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 卷积网络模块构建"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(CNN, self).__init__()\n",
    "        self.conv1 = nn.Sequential(     # 输入大小（1, 28, 28）\n",
    "            nn.Conv2d(\n",
    "            in_channels=1,              # 灰度图\n",
    "            out_channels=16,            # 要得到多少个特征图，卷积核的个数\n",
    "            kernel_size=5,              # 卷积核大小\n",
    "            stride=1,                   # 卷积核移动的步长\n",
    "            padding=2),                 # 如果想要卷积后尺寸与输入一致，需要进行padding padding=(kernel_size-1)/2 if stride=1\n",
    "            nn.ReLU(),                  # 激活函数\n",
    "            nn.MaxPool2d(kernel_size=2) # 池化层 2*2区域，输出结果（16， 14， 14）\n",
    "        )\n",
    "        self.conv2 = nn.Sequential(\n",
    "            nn.Conv2d(16, 32, 5, 1, 2), # 输出（32， 14， 14）\n",
    "            nn.ReLU(),                  # 激活函数\n",
    "            nn.Conv2d(32, 32, 5, 1, 2),\n",
    "            nn.ReLU(),                  # 激活函数\n",
    "            nn.MaxPool2d(2)             # 输出（32， 7， 7）\n",
    "        )\n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv2d(32, 64, 5, 1, 2),\n",
    "            nn.ReLU(),                  # 激活函数\n",
    "        )\n",
    "        self.out = nn.Linear(64 * 7 * 7, 10) # 全连接层\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.conv3(x)\n",
    "        x = x.view(x.size(0), -1)          # 展平，结果为：（batch_size, 32*7*7）\n",
    "        output = self.out(x)\n",
    "        return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 准确率作为评估标准"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def accuracy(predictions, labels):\n",
    "    pred = torch.max(predictions.data, 1)[1]\n",
    "    rights = pred.eq(labels.data.view_as(pred)).sum()\n",
    "    return rights, len(labels)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch: 0, Loss: 2.299129, Train Acc: 0.187500, Val Acc: 0.097400\n",
      "Epoch: 0, Loss: 0.212511, Train Acc: 0.783106, Val Acc: 0.944000\n",
      "Epoch: 0, Loss: 0.180098, Train Acc: 0.865050, Val Acc: 0.966400\n",
      "Epoch: 0, Loss: 0.127031, Train Acc: 0.896076, Val Acc: 0.972700\n",
      "Epoch: 0, Loss: 0.090177, Train Acc: 0.914121, Val Acc: 0.974000\n",
      "Epoch: 0, Loss: 0.314028, Train Acc: 0.926366, Val Acc: 0.981900\n",
      "Epoch: 0, Loss: 0.058789, Train Acc: 0.934874, Val Acc: 0.982800\n",
      "Epoch: 0, Loss: 0.069536, Train Acc: 0.941423, Val Acc: 0.985400\n",
      "Epoch: 0, Loss: 0.049931, Train Acc: 0.945810, Val Acc: 0.987100\n",
      "Epoch: 0, Loss: 0.040476, Train Acc: 0.949605, Val Acc: 0.984100\n",
      "Epoch: 1, Loss: 0.040903, Train Acc: 0.984375, Val Acc: 0.983000\n",
      "Epoch: 1, Loss: 0.132987, Train Acc: 0.985458, Val Acc: 0.988800\n",
      "Epoch: 1, Loss: 0.030000, Train Acc: 0.985463, Val Acc: 0.984600\n",
      "Epoch: 1, Loss: 0.005089, Train Acc: 0.985777, Val Acc: 0.985100\n",
      "Epoch: 1, Loss: 0.010545, Train Acc: 0.984765, Val Acc: 0.988600\n",
      "Epoch: 1, Loss: 0.012719, Train Acc: 0.985404, Val Acc: 0.989600\n",
      "Epoch: 1, Loss: 0.010555, Train Acc: 0.985467, Val Acc: 0.989400\n",
      "Epoch: 1, Loss: 0.032470, Train Acc: 0.985400, Val Acc: 0.991800\n",
      "Epoch: 1, Loss: 0.044855, Train Acc: 0.985682, Val Acc: 0.991600\n",
      "Epoch: 1, Loss: 0.013289, Train Acc: 0.985988, Val Acc: 0.990500\n",
      "Epoch: 2, Loss: 0.048866, Train Acc: 0.968750, Val Acc: 0.988100\n",
      "Epoch: 2, Loss: 0.065872, Train Acc: 0.990718, Val Acc: 0.990800\n",
      "Epoch: 2, Loss: 0.007979, Train Acc: 0.989817, Val Acc: 0.991600\n",
      "Epoch: 2, Loss: 0.061643, Train Acc: 0.989877, Val Acc: 0.990200\n",
      "Epoch: 2, Loss: 0.001040, Train Acc: 0.989752, Val Acc: 0.988700\n",
      "Epoch: 2, Loss: 0.082380, Train Acc: 0.990020, Val Acc: 0.991000\n",
      "Epoch: 2, Loss: 0.079464, Train Acc: 0.990199, Val Acc: 0.990900\n",
      "Epoch: 2, Loss: 0.050243, Train Acc: 0.990237, Val Acc: 0.992200\n",
      "Epoch: 2, Loss: 0.053389, Train Acc: 0.990559, Val Acc: 0.992900\n",
      "Epoch: 2, Loss: 0.112476, Train Acc: 0.990479, Val Acc: 0.992200\n"
     ]
    }
   ],
   "source": [
    "net = CNN()\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.Adam(net.parameters(), lr=0.001)\n",
    "for epoch in range(num_epochs):\n",
    "    train_rights = []\n",
    "    for batch_idx, (data, target) in enumerate(train_loader):\n",
    "        net.train()\n",
    "        output = net(data)\n",
    "        loss = criterion(output, target)\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        right = accuracy(output, target)\n",
    "        train_rights.append(right)\n",
    "    \n",
    "        if batch_idx%100 == 0:\n",
    "            net.eval()\n",
    "            val_rights=[]\n",
    "\n",
    "            for (data, target) in test_loader:\n",
    "                output = net(data)\n",
    "                right = accuracy(output, target)\n",
    "                val_rights.append(right)\n",
    "\n",
    "            train_r = (sum([tup[0] for tup in train_rights]), sum([tup[1] for tup in train_rights]))\n",
    "            val_r = (sum([tup[0] for tup in val_rights]), sum([tup[1] for tup in val_rights]))\n",
    "\n",
    "            print(\"Epoch: %d, Loss: %f, Train Acc: %f, Val Acc: %f\" % (epoch, loss.item(), train_r[0].item() / train_r[1], val_r[0].item() / val_r[1]))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "rob",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
