{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# LeNet算法复现  \n",
    "![LeNet-5](https://pic4.zhimg.com/80/v2-494332f64a738674daffbe676ad18d63_720w.webp)  \n",
    "\n",
    "卷积层输出： \n",
    "  \n",
    "$o= \\lfloor \\frac{n+2p-f}{s} \\rfloor+ 1$  \n",
    "  \n",
    "池化层输出：\n",
    "\n",
    "$o= \\frac{n+2p-f}{s}+ 1$  \n",
    "\n",
    "$n代表图片大小，p代表填充，f代表卷积核，s代表步长，o代表输出图片大小$  \n",
    "\n",
    "池化输出大小=[（输入大小-卷积核（过滤器）大小）／步长]+1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader\n",
    "device = ('cuda' if torch.cuda.is_available() else 'cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "#获取数据\n",
    "train_data = torchvision.datasets.MNIST(\n",
    "    root='../data/', \n",
    "    train=True, \n",
    "    transform=torchvision.transforms.ToTensor(), \n",
    "    download=False)\n",
    "\n",
    "test_data = torchvision.datasets.MNIST(\n",
    "    root='../data/', \n",
    "    train=False, \n",
    "    transform=torchvision.transforms.ToTensor(), \n",
    "    download=False)\n",
    "\n",
    "#对数据进行分批次训练\n",
    "batch_size = 64\n",
    "train_dataloader = DataLoader(train_data, batch_size=batch_size)\n",
    "test_dataloader = DataLoader(test_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义模型\n",
    "class LeNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(LeNet, self).__init__()\n",
    "        self.layer1 = nn.Sequential(\n",
    "            nn.Conv2d(1, 6, kernel_size=5, stride=1, padding= 2), #1*28*28-- 6*28*28\n",
    "            # 设置 padding=2 使得结果为 28*28 (28+2*2-5)/1+ 1\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2), # 6*14*14\n",
    "        )\n",
    "        self.layer2 = nn.Sequential(\n",
    "            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding= 0), # 16*10*10\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2), # 16*5*5\n",
    "        )\n",
    "        self.layer3 = nn.Sequential(\n",
    "            nn.Conv2d(16, 120, kernel_size=5, stride=1, padding=0), #120*1\n",
    "            nn.ReLU()\n",
    "        )\n",
    "        self.fc1 = nn.Sequential(\n",
    "            nn.Linear(120, 84),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(84, 10)\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc1(x)\n",
    "        return x\n",
    "\n",
    "def evaluate_accuracy(data, model):\n",
    "    \"\"\"\n",
    "    计算测试集训练效果\n",
    "    \"\"\"\n",
    "    acc_sum, n = 0.0, 0\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        for x, y in data:\n",
    "            x, y = x.to(device), y.to(device)\n",
    "            acc_sum += (model(x).argmax(1)== y).float().sum().item() #计算正确的个数\n",
    "            n += y.shape[0] #计算全部数据个数\n",
    "    return acc_sum/ n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "#定义损失函数以及优化函数\n",
    "model = LeNet().to(device)\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(data, model, loss_fn, optimizer):\n",
    "    size = len(data.dataset)\n",
    "    model.train()\n",
    "    for batch, (x,y) in enumerate(data):\n",
    "        x, y = x.to(device), y.to(device)\n",
    "\n",
    "        pred = model(x)\n",
    "        loss = loss_fn(pred, y)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        if batch %100 == 0:\n",
    "            loss, current = loss.item(), (batch+ 1)* len(x) #loss为：tensor(127.4510, device='cuda:0', grad_fn=<DivBackward1>)所以通过item()去得到他们的具体数值\n",
    "            print(f\"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]\")\n",
    "    \n",
    "\n",
    "def test(data, model, loss_fn):\n",
    "    size = len(data.dataset)\n",
    "    num_batches = len(data)\n",
    "    model.eval()\n",
    "    test_loss, correct = 0, 0\n",
    "    with torch.no_grad():\n",
    "        for x, y in data:\n",
    "            x, y = x.to(device), y.to(device)\n",
    "            pred = model(x)\n",
    "            test_loss += loss_fn(pred, y).item()\n",
    "            correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n",
    "    test_loss /= num_batches\n",
    "    correct /= size\n",
    "    print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.272300  [   64/60000]\n",
      "loss: 2.269048  [ 6464/60000]\n",
      "loss: 2.278383  [12864/60000]\n",
      "loss: 2.273706  [19264/60000]\n",
      "loss: 2.274111  [25664/60000]\n",
      "loss: 2.270103  [32064/60000]\n",
      "loss: 2.254435  [38464/60000]\n",
      "loss: 2.277729  [44864/60000]\n",
      "loss: 2.270250  [51264/60000]\n",
      "loss: 2.255527  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 30.1%, Avg loss: 2.258554 \n",
      "\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 2.257055  [   64/60000]\n",
      "loss: 2.248479  [ 6464/60000]\n",
      "loss: 2.262434  [12864/60000]\n",
      "loss: 2.252451  [19264/60000]\n",
      "loss: 2.254298  [25664/60000]\n",
      "loss: 2.245104  [32064/60000]\n",
      "loss: 2.219618  [38464/60000]\n",
      "loss: 2.255465  [44864/60000]\n",
      "loss: 2.239263  [51264/60000]\n",
      "loss: 2.216094  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 38.5%, Avg loss: 2.220405 \n",
      "\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 2.220529  [   64/60000]\n",
      "loss: 2.198015  [ 6464/60000]\n",
      "loss: 2.226407  [12864/60000]\n",
      "loss: 2.196418  [19264/60000]\n",
      "loss: 2.201220  [25664/60000]\n",
      "loss: 2.175020  [32064/60000]\n",
      "loss: 2.122310  [38464/60000]\n",
      "loss: 2.191850  [44864/60000]\n",
      "loss: 2.142621  [51264/60000]\n",
      "loss: 2.095963  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 44.2%, Avg loss: 2.098208 \n",
      "\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 2.103936  [   64/60000]\n",
      "loss: 2.031757  [ 6464/60000]\n",
      "loss: 2.105584  [12864/60000]\n",
      "loss: 2.008843  [19264/60000]\n",
      "loss: 2.016967  [25664/60000]\n",
      "loss: 1.924333  [32064/60000]\n",
      "loss: 1.748101  [38464/60000]\n",
      "loss: 1.972561  [44864/60000]\n",
      "loss: 1.804982  [51264/60000]\n",
      "loss: 1.649397  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 54.8%, Avg loss: 1.677555 \n",
      "\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 1.742907  [   64/60000]\n",
      "loss: 1.542034  [ 6464/60000]\n",
      "loss: 1.671715  [12864/60000]\n",
      "loss: 1.490320  [19264/60000]\n",
      "loss: 1.450628  [25664/60000]\n",
      "loss: 1.320244  [32064/60000]\n",
      "loss: 1.038140  [38464/60000]\n",
      "loss: 1.442216  [44864/60000]\n",
      "loss: 1.164547  [51264/60000]\n",
      "loss: 1.009380  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 70.4%, Avg loss: 1.018695 \n",
      "\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 1.148516  [   64/60000]\n",
      "loss: 0.897734  [ 6464/60000]\n",
      "loss: 0.964151  [12864/60000]\n",
      "loss: 0.832320  [19264/60000]\n",
      "loss: 0.885078  [25664/60000]\n",
      "loss: 0.752632  [32064/60000]\n",
      "loss: 0.629204  [38464/60000]\n",
      "loss: 0.919603  [44864/60000]\n",
      "loss: 0.766702  [51264/60000]\n",
      "loss: 0.731564  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 79.8%, Avg loss: 0.670018 \n",
      "\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.817475  [   64/60000]\n",
      "loss: 0.596555  [ 6464/60000]\n",
      "loss: 0.593916  [12864/60000]\n",
      "loss: 0.590386  [19264/60000]\n",
      "loss: 0.677329  [25664/60000]\n",
      "loss: 0.536581  [32064/60000]\n",
      "loss: 0.469469  [38464/60000]\n",
      "loss: 0.676546  [44864/60000]\n",
      "loss: 0.615217  [51264/60000]\n",
      "loss: 0.625991  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 84.1%, Avg loss: 0.528252 \n",
      "\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.682580  [   64/60000]\n",
      "loss: 0.482965  [ 6464/60000]\n",
      "loss: 0.444592  [12864/60000]\n",
      "loss: 0.502215  [19264/60000]\n",
      "loss: 0.565583  [25664/60000]\n",
      "loss: 0.454061  [32064/60000]\n",
      "loss: 0.375289  [38464/60000]\n",
      "loss: 0.562539  [44864/60000]\n",
      "loss: 0.538687  [51264/60000]\n",
      "loss: 0.565120  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 86.5%, Avg loss: 0.452470 \n",
      "\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.595269  [   64/60000]\n",
      "loss: 0.420472  [ 6464/60000]\n",
      "loss: 0.369769  [12864/60000]\n",
      "loss: 0.452824  [19264/60000]\n",
      "loss: 0.485208  [25664/60000]\n",
      "loss: 0.413044  [32064/60000]\n",
      "loss: 0.316019  [38464/60000]\n",
      "loss: 0.505066  [44864/60000]\n",
      "loss: 0.483565  [51264/60000]\n",
      "loss: 0.524046  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 87.9%, Avg loss: 0.403727 \n",
      "\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.527021  [   64/60000]\n",
      "loss: 0.376387  [ 6464/60000]\n",
      "loss: 0.322572  [12864/60000]\n",
      "loss: 0.420881  [19264/60000]\n",
      "loss: 0.419712  [25664/60000]\n",
      "loss: 0.387603  [32064/60000]\n",
      "loss: 0.278192  [38464/60000]\n",
      "loss: 0.469517  [44864/60000]\n",
      "loss: 0.439173  [51264/60000]\n",
      "loss: 0.490684  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 89.2%, Avg loss: 0.367917 \n",
      "\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.470405  [   64/60000]\n",
      "loss: 0.342959  [ 6464/60000]\n",
      "loss: 0.287529  [12864/60000]\n",
      "loss: 0.399545  [19264/60000]\n",
      "loss: 0.365872  [25664/60000]\n",
      "loss: 0.368633  [32064/60000]\n",
      "loss: 0.253037  [38464/60000]\n",
      "loss: 0.446631  [44864/60000]\n",
      "loss: 0.399979  [51264/60000]\n",
      "loss: 0.463988  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 90.0%, Avg loss: 0.340112 \n",
      "\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.425155  [   64/60000]\n",
      "loss: 0.320384  [ 6464/60000]\n",
      "loss: 0.260997  [12864/60000]\n",
      "loss: 0.385833  [19264/60000]\n",
      "loss: 0.325431  [25664/60000]\n",
      "loss: 0.353819  [32064/60000]\n",
      "loss: 0.232314  [38464/60000]\n",
      "loss: 0.432795  [44864/60000]\n",
      "loss: 0.367541  [51264/60000]\n",
      "loss: 0.440752  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 90.5%, Avg loss: 0.317997 \n",
      "\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.384871  [   64/60000]\n",
      "loss: 0.303614  [ 6464/60000]\n",
      "loss: 0.238483  [12864/60000]\n",
      "loss: 0.375285  [19264/60000]\n",
      "loss: 0.294723  [25664/60000]\n",
      "loss: 0.339454  [32064/60000]\n",
      "loss: 0.216060  [38464/60000]\n",
      "loss: 0.420999  [44864/60000]\n",
      "loss: 0.339942  [51264/60000]\n",
      "loss: 0.419056  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 90.9%, Avg loss: 0.299733 \n",
      "\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.352212  [   64/60000]\n",
      "loss: 0.293606  [ 6464/60000]\n",
      "loss: 0.219635  [12864/60000]\n",
      "loss: 0.366890  [19264/60000]\n",
      "loss: 0.269858  [25664/60000]\n",
      "loss: 0.325129  [32064/60000]\n",
      "loss: 0.201280  [38464/60000]\n",
      "loss: 0.413026  [44864/60000]\n",
      "loss: 0.315552  [51264/60000]\n",
      "loss: 0.399812  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 91.4%, Avg loss: 0.284142 \n",
      "\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.324788  [   64/60000]\n",
      "loss: 0.286184  [ 6464/60000]\n",
      "loss: 0.206210  [12864/60000]\n",
      "loss: 0.359954  [19264/60000]\n",
      "loss: 0.252210  [25664/60000]\n",
      "loss: 0.310987  [32064/60000]\n",
      "loss: 0.189725  [38464/60000]\n",
      "loss: 0.404570  [44864/60000]\n",
      "loss: 0.295089  [51264/60000]\n",
      "loss: 0.381713  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 91.8%, Avg loss: 0.270155 \n",
      "\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.299573  [   64/60000]\n",
      "loss: 0.280513  [ 6464/60000]\n",
      "loss: 0.195589  [12864/60000]\n",
      "loss: 0.352637  [19264/60000]\n",
      "loss: 0.236667  [25664/60000]\n",
      "loss: 0.297527  [32064/60000]\n",
      "loss: 0.178814  [38464/60000]\n",
      "loss: 0.395350  [44864/60000]\n",
      "loss: 0.276259  [51264/60000]\n",
      "loss: 0.364834  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 92.2%, Avg loss: 0.257590 \n",
      "\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.276401  [   64/60000]\n",
      "loss: 0.274002  [ 6464/60000]\n",
      "loss: 0.187289  [12864/60000]\n",
      "loss: 0.345724  [19264/60000]\n",
      "loss: 0.222929  [25664/60000]\n",
      "loss: 0.285335  [32064/60000]\n",
      "loss: 0.167951  [38464/60000]\n",
      "loss: 0.385595  [44864/60000]\n",
      "loss: 0.259681  [51264/60000]\n",
      "loss: 0.348546  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 92.5%, Avg loss: 0.246200 \n",
      "\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.254310  [   64/60000]\n",
      "loss: 0.267883  [ 6464/60000]\n",
      "loss: 0.180744  [12864/60000]\n",
      "loss: 0.337245  [19264/60000]\n",
      "loss: 0.210713  [25664/60000]\n",
      "loss: 0.274544  [32064/60000]\n",
      "loss: 0.157922  [38464/60000]\n",
      "loss: 0.376551  [44864/60000]\n",
      "loss: 0.244212  [51264/60000]\n",
      "loss: 0.333477  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 92.9%, Avg loss: 0.235646 \n",
      "\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.234088  [   64/60000]\n",
      "loss: 0.262315  [ 6464/60000]\n",
      "loss: 0.175059  [12864/60000]\n",
      "loss: 0.328604  [19264/60000]\n",
      "loss: 0.199100  [25664/60000]\n",
      "loss: 0.265312  [32064/60000]\n",
      "loss: 0.149548  [38464/60000]\n",
      "loss: 0.366871  [44864/60000]\n",
      "loss: 0.231003  [51264/60000]\n",
      "loss: 0.320089  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 93.3%, Avg loss: 0.225980 \n",
      "\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.215904  [   64/60000]\n",
      "loss: 0.256772  [ 6464/60000]\n",
      "loss: 0.169321  [12864/60000]\n",
      "loss: 0.320245  [19264/60000]\n",
      "loss: 0.187060  [25664/60000]\n",
      "loss: 0.256086  [32064/60000]\n",
      "loss: 0.142840  [38464/60000]\n",
      "loss: 0.357499  [44864/60000]\n",
      "loss: 0.218770  [51264/60000]\n",
      "loss: 0.307769  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 93.6%, Avg loss: 0.216964 \n",
      "\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.200837  [   64/60000]\n",
      "loss: 0.252707  [ 6464/60000]\n",
      "loss: 0.163498  [12864/60000]\n",
      "loss: 0.312334  [19264/60000]\n",
      "loss: 0.176415  [25664/60000]\n",
      "loss: 0.248623  [32064/60000]\n",
      "loss: 0.136781  [38464/60000]\n",
      "loss: 0.347503  [44864/60000]\n",
      "loss: 0.208322  [51264/60000]\n",
      "loss: 0.297342  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 93.7%, Avg loss: 0.208597 \n",
      "\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.187000  [   64/60000]\n",
      "loss: 0.248547  [ 6464/60000]\n",
      "loss: 0.158774  [12864/60000]\n",
      "loss: 0.304266  [19264/60000]\n",
      "loss: 0.165540  [25664/60000]\n",
      "loss: 0.241203  [32064/60000]\n",
      "loss: 0.132309  [38464/60000]\n",
      "loss: 0.337949  [44864/60000]\n",
      "loss: 0.199489  [51264/60000]\n",
      "loss: 0.288586  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 94.0%, Avg loss: 0.200504 \n",
      "\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.175113  [   64/60000]\n",
      "loss: 0.244488  [ 6464/60000]\n",
      "loss: 0.153888  [12864/60000]\n",
      "loss: 0.296739  [19264/60000]\n",
      "loss: 0.156701  [25664/60000]\n",
      "loss: 0.233408  [32064/60000]\n",
      "loss: 0.127569  [38464/60000]\n",
      "loss: 0.327803  [44864/60000]\n",
      "loss: 0.192073  [51264/60000]\n",
      "loss: 0.280420  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 94.2%, Avg loss: 0.193056 \n",
      "\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.164669  [   64/60000]\n",
      "loss: 0.238974  [ 6464/60000]\n",
      "loss: 0.149055  [12864/60000]\n",
      "loss: 0.290120  [19264/60000]\n",
      "loss: 0.146750  [25664/60000]\n",
      "loss: 0.226248  [32064/60000]\n",
      "loss: 0.123039  [38464/60000]\n",
      "loss: 0.318942  [44864/60000]\n",
      "loss: 0.186323  [51264/60000]\n",
      "loss: 0.274440  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 94.4%, Avg loss: 0.186250 \n",
      "\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.155306  [   64/60000]\n",
      "loss: 0.234572  [ 6464/60000]\n",
      "loss: 0.143423  [12864/60000]\n",
      "loss: 0.284805  [19264/60000]\n",
      "loss: 0.136558  [25664/60000]\n",
      "loss: 0.218994  [32064/60000]\n",
      "loss: 0.119276  [38464/60000]\n",
      "loss: 0.311303  [44864/60000]\n",
      "loss: 0.181491  [51264/60000]\n",
      "loss: 0.268996  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 94.6%, Avg loss: 0.179679 \n",
      "\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.147111  [   64/60000]\n",
      "loss: 0.229742  [ 6464/60000]\n",
      "loss: 0.138638  [12864/60000]\n",
      "loss: 0.279645  [19264/60000]\n",
      "loss: 0.127543  [25664/60000]\n",
      "loss: 0.212860  [32064/60000]\n",
      "loss: 0.115741  [38464/60000]\n",
      "loss: 0.303846  [44864/60000]\n",
      "loss: 0.177719  [51264/60000]\n",
      "loss: 0.264207  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 94.7%, Avg loss: 0.173709 \n",
      "\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.139985  [   64/60000]\n",
      "loss: 0.224278  [ 6464/60000]\n",
      "loss: 0.134438  [12864/60000]\n",
      "loss: 0.275243  [19264/60000]\n",
      "loss: 0.119117  [25664/60000]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_7180\\2134615688.py\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      3\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0mepochs\u001b[0m\u001b[1;33m%\u001b[0m\u001b[1;36m10\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m         \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mf\"Epoch {t+1}\\n-------------------------------\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m         \u001b[0mtrain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtrain_dataloader\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      6\u001b[0m         \u001b[0mtest\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtest_dataloader\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      7\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Done!\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_7180\\2395823617.py\u001b[0m in \u001b[0;36mtrain\u001b[1;34m(data, model, loss_fn, optimizer)\u001b[0m\n\u001b[0;32m      9\u001b[0m         \u001b[0mx\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mto\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     10\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 11\u001b[1;33m         \u001b[0mpred\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     12\u001b[0m         \u001b[0mloss\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mloss_fn\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpred\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     13\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1192\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1193\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1195\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1196\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\AppData\\Local\\Temp\\ipykernel_7180\\2835041471.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, x)\u001b[0m\n\u001b[0;32m     26\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlayer1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     27\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlayer2\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 28\u001b[1;33m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mlayer3\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     29\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mview\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     30\u001b[0m         \u001b[0mx\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfc1\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1192\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1193\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1195\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1196\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\container.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    202\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    203\u001b[0m         \u001b[1;32mfor\u001b[0m \u001b[0mmodule\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 204\u001b[1;33m             \u001b[0minput\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    205\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    206\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[1;34m(self, *input, **kwargs)\u001b[0m\n\u001b[0;32m   1192\u001b[0m         if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks\n\u001b[0;32m   1193\u001b[0m                 or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[1;32m-> 1194\u001b[1;33m             \u001b[1;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m   1195\u001b[0m         \u001b[1;31m# Do not call functions when jit is used\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   1196\u001b[0m         \u001b[0mfull_backward_hooks\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnon_full_backward_hooks\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\conv.py\u001b[0m in \u001b[0;36mforward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    461\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    462\u001b[0m     \u001b[1;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0minput\u001b[0m\u001b[1;33m:\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m->\u001b[0m \u001b[0mTensor\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 463\u001b[1;33m         \u001b[1;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_conv_forward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0minput\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbias\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    464\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    465\u001b[0m \u001b[1;32mclass\u001b[0m \u001b[0mConv3d\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0m_ConvNd\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32me:\\Anaconda\\lib\\site-packages\\torch\\nn\\modules\\conv.py\u001b[0m in \u001b[0;36m_conv_forward\u001b[1;34m(self, input, weight, bias)\u001b[0m\n\u001b[0;32m    457\u001b[0m                             \u001b[0mweight\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbias\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstride\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    458\u001b[0m                             _pair(0), self.dilation, self.groups)\n\u001b[1;32m--> 459\u001b[1;33m         return F.conv2d(input, weight, bias, self.stride,\n\u001b[0m\u001b[0;32m    460\u001b[0m                         self.padding, self.dilation, self.groups)\n\u001b[0;32m    461\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "epochs = 21\n",
    "for t in range(epochs):\n",
    "    if epochs/10 == 0:\n",
    "        print(f\"Epoch {t+1}\\n-------------------------------\")\n",
    "        train(train_dataloader, model, loss_fn, optimizer)\n",
    "        test(test_dataloader, model, loss_fn)\n",
    "print(\"Done!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch:1\n",
      "\n",
      "loss: 0.098646  [   64/60000]\n",
      "loss: 0.157770  [ 6464/60000]\n",
      "loss: 0.106539  [12864/60000]\n",
      "loss: 0.215023  [19264/60000]\n",
      "loss: 0.100605  [25664/60000]\n",
      "loss: 0.150523  [32064/60000]\n",
      "loss: 0.102860  [38464/60000]\n",
      "loss: 0.228336  [44864/60000]\n",
      "loss: 0.174099  [51264/60000]\n",
      "loss: 0.196144  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 95.9% \n",
      "\n",
      "Epoch:2\n",
      "\n",
      "loss: 0.107013  [   64/60000]\n",
      "loss: 0.154821  [ 6464/60000]\n",
      "loss: 0.105823  [12864/60000]\n",
      "loss: 0.208457  [19264/60000]\n",
      "loss: 0.096202  [25664/60000]\n",
      "loss: 0.145667  [32064/60000]\n",
      "loss: 0.101881  [38464/60000]\n",
      "loss: 0.221606  [44864/60000]\n",
      "loss: 0.172308  [51264/60000]\n",
      "loss: 0.194549  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.0% \n",
      "\n",
      "Epoch:3\n",
      "\n",
      "loss: 0.102882  [   64/60000]\n",
      "loss: 0.150609  [ 6464/60000]\n",
      "loss: 0.103246  [12864/60000]\n",
      "loss: 0.202459  [19264/60000]\n",
      "loss: 0.090362  [25664/60000]\n",
      "loss: 0.141385  [32064/60000]\n",
      "loss: 0.101247  [38464/60000]\n",
      "loss: 0.215352  [44864/60000]\n",
      "loss: 0.170169  [51264/60000]\n",
      "loss: 0.192590  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.1% \n",
      "\n",
      "Epoch:4\n",
      "\n",
      "loss: 0.099215  [   64/60000]\n",
      "loss: 0.146702  [ 6464/60000]\n",
      "loss: 0.100990  [12864/60000]\n",
      "loss: 0.196955  [19264/60000]\n",
      "loss: 0.085184  [25664/60000]\n",
      "loss: 0.137365  [32064/60000]\n",
      "loss: 0.100910  [38464/60000]\n",
      "loss: 0.210799  [44864/60000]\n",
      "loss: 0.167222  [51264/60000]\n",
      "loss: 0.190774  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.2% \n",
      "\n",
      "Epoch:5\n",
      "\n",
      "loss: 0.095846  [   64/60000]\n",
      "loss: 0.143141  [ 6464/60000]\n",
      "loss: 0.098452  [12864/60000]\n",
      "loss: 0.191900  [19264/60000]\n",
      "loss: 0.080353  [25664/60000]\n",
      "loss: 0.133666  [32064/60000]\n",
      "loss: 0.100417  [38464/60000]\n",
      "loss: 0.206443  [44864/60000]\n",
      "loss: 0.165251  [51264/60000]\n",
      "loss: 0.189182  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.4% \n",
      "\n",
      "Epoch:6\n",
      "\n",
      "loss: 0.092713  [   64/60000]\n",
      "loss: 0.139563  [ 6464/60000]\n",
      "loss: 0.096374  [12864/60000]\n",
      "loss: 0.187604  [19264/60000]\n",
      "loss: 0.076391  [25664/60000]\n",
      "loss: 0.130577  [32064/60000]\n",
      "loss: 0.100084  [38464/60000]\n",
      "loss: 0.201824  [44864/60000]\n",
      "loss: 0.163594  [51264/60000]\n",
      "loss: 0.187552  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.5% \n",
      "\n",
      "Epoch:7\n",
      "\n",
      "loss: 0.090275  [   64/60000]\n",
      "loss: 0.136202  [ 6464/60000]\n",
      "loss: 0.094576  [12864/60000]\n",
      "loss: 0.183031  [19264/60000]\n",
      "loss: 0.072579  [25664/60000]\n",
      "loss: 0.127552  [32064/60000]\n",
      "loss: 0.100191  [38464/60000]\n",
      "loss: 0.197527  [44864/60000]\n",
      "loss: 0.161685  [51264/60000]\n",
      "loss: 0.185743  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.5% \n",
      "\n",
      "Epoch:8\n",
      "\n",
      "loss: 0.087929  [   64/60000]\n",
      "loss: 0.132803  [ 6464/60000]\n",
      "loss: 0.093051  [12864/60000]\n",
      "loss: 0.178486  [19264/60000]\n",
      "loss: 0.069132  [25664/60000]\n",
      "loss: 0.125270  [32064/60000]\n",
      "loss: 0.100164  [38464/60000]\n",
      "loss: 0.193437  [44864/60000]\n",
      "loss: 0.159913  [51264/60000]\n",
      "loss: 0.183776  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.6% \n",
      "\n",
      "Epoch:9\n",
      "\n",
      "loss: 0.085961  [   64/60000]\n",
      "loss: 0.129178  [ 6464/60000]\n",
      "loss: 0.091663  [12864/60000]\n",
      "loss: 0.174406  [19264/60000]\n",
      "loss: 0.066205  [25664/60000]\n",
      "loss: 0.122413  [32064/60000]\n",
      "loss: 0.100105  [38464/60000]\n",
      "loss: 0.190159  [44864/60000]\n",
      "loss: 0.158249  [51264/60000]\n",
      "loss: 0.182137  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.6% \n",
      "\n",
      "Epoch:10\n",
      "\n",
      "loss: 0.083964  [   64/60000]\n",
      "loss: 0.125733  [ 6464/60000]\n",
      "loss: 0.090878  [12864/60000]\n",
      "loss: 0.169320  [19264/60000]\n",
      "loss: 0.063556  [25664/60000]\n",
      "loss: 0.120058  [32064/60000]\n",
      "loss: 0.099898  [38464/60000]\n",
      "loss: 0.186033  [44864/60000]\n",
      "loss: 0.156695  [51264/60000]\n",
      "loss: 0.180757  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.7% \n",
      "\n",
      "Epoch:11\n",
      "\n",
      "loss: 0.082257  [   64/60000]\n",
      "loss: 0.122557  [ 6464/60000]\n",
      "loss: 0.090394  [12864/60000]\n",
      "loss: 0.165458  [19264/60000]\n",
      "loss: 0.061349  [25664/60000]\n",
      "loss: 0.117119  [32064/60000]\n",
      "loss: 0.100087  [38464/60000]\n",
      "loss: 0.182522  [44864/60000]\n",
      "loss: 0.156071  [51264/60000]\n",
      "loss: 0.179523  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.8% \n",
      "\n",
      "Epoch:12\n",
      "\n",
      "loss: 0.080549  [   64/60000]\n",
      "loss: 0.119308  [ 6464/60000]\n",
      "loss: 0.089058  [12864/60000]\n",
      "loss: 0.161788  [19264/60000]\n",
      "loss: 0.059212  [25664/60000]\n",
      "loss: 0.114778  [32064/60000]\n",
      "loss: 0.099970  [38464/60000]\n",
      "loss: 0.179842  [44864/60000]\n",
      "loss: 0.154772  [51264/60000]\n",
      "loss: 0.178012  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.9% \n",
      "\n",
      "Epoch:13\n",
      "\n",
      "loss: 0.078873  [   64/60000]\n",
      "loss: 0.116339  [ 6464/60000]\n",
      "loss: 0.088101  [12864/60000]\n",
      "loss: 0.157712  [19264/60000]\n",
      "loss: 0.057071  [25664/60000]\n",
      "loss: 0.112539  [32064/60000]\n",
      "loss: 0.099875  [38464/60000]\n",
      "loss: 0.177357  [44864/60000]\n",
      "loss: 0.153125  [51264/60000]\n",
      "loss: 0.176277  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.9% \n",
      "\n",
      "Epoch:14\n",
      "\n",
      "loss: 0.077309  [   64/60000]\n",
      "loss: 0.113174  [ 6464/60000]\n",
      "loss: 0.087097  [12864/60000]\n",
      "loss: 0.154341  [19264/60000]\n",
      "loss: 0.054654  [25664/60000]\n",
      "loss: 0.110167  [32064/60000]\n",
      "loss: 0.099950  [38464/60000]\n",
      "loss: 0.174374  [44864/60000]\n",
      "loss: 0.151898  [51264/60000]\n",
      "loss: 0.174753  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 96.9% \n",
      "\n",
      "Epoch:15\n",
      "\n",
      "loss: 0.075497  [   64/60000]\n",
      "loss: 0.110737  [ 6464/60000]\n",
      "loss: 0.085666  [12864/60000]\n",
      "loss: 0.151679  [19264/60000]\n",
      "loss: 0.052687  [25664/60000]\n",
      "loss: 0.107632  [32064/60000]\n",
      "loss: 0.100248  [38464/60000]\n",
      "loss: 0.171724  [44864/60000]\n",
      "loss: 0.150230  [51264/60000]\n",
      "loss: 0.173451  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.0% \n",
      "\n",
      "Epoch:16\n",
      "\n",
      "loss: 0.073662  [   64/60000]\n",
      "loss: 0.107864  [ 6464/60000]\n",
      "loss: 0.084569  [12864/60000]\n",
      "loss: 0.148988  [19264/60000]\n",
      "loss: 0.050488  [25664/60000]\n",
      "loss: 0.105394  [32064/60000]\n",
      "loss: 0.100237  [38464/60000]\n",
      "loss: 0.168985  [44864/60000]\n",
      "loss: 0.148702  [51264/60000]\n",
      "loss: 0.172232  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.0% \n",
      "\n",
      "Epoch:17\n",
      "\n",
      "loss: 0.072101  [   64/60000]\n",
      "loss: 0.106040  [ 6464/60000]\n",
      "loss: 0.083778  [12864/60000]\n",
      "loss: 0.146514  [19264/60000]\n",
      "loss: 0.048645  [25664/60000]\n",
      "loss: 0.103209  [32064/60000]\n",
      "loss: 0.100347  [38464/60000]\n",
      "loss: 0.166806  [44864/60000]\n",
      "loss: 0.147567  [51264/60000]\n",
      "loss: 0.171285  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.0% \n",
      "\n",
      "Epoch:18\n",
      "\n",
      "loss: 0.070447  [   64/60000]\n",
      "loss: 0.104067  [ 6464/60000]\n",
      "loss: 0.082554  [12864/60000]\n",
      "loss: 0.143509  [19264/60000]\n",
      "loss: 0.046810  [25664/60000]\n",
      "loss: 0.101432  [32064/60000]\n",
      "loss: 0.100408  [38464/60000]\n",
      "loss: 0.164900  [44864/60000]\n",
      "loss: 0.145843  [51264/60000]\n",
      "loss: 0.170779  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.1% \n",
      "\n",
      "Epoch:19\n",
      "\n",
      "loss: 0.068664  [   64/60000]\n",
      "loss: 0.101738  [ 6464/60000]\n",
      "loss: 0.081756  [12864/60000]\n",
      "loss: 0.141499  [19264/60000]\n",
      "loss: 0.045196  [25664/60000]\n",
      "loss: 0.099498  [32064/60000]\n",
      "loss: 0.100201  [38464/60000]\n",
      "loss: 0.162691  [44864/60000]\n",
      "loss: 0.144729  [51264/60000]\n",
      "loss: 0.169254  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.2% \n",
      "\n",
      "Epoch:20\n",
      "\n",
      "loss: 0.067405  [   64/60000]\n",
      "loss: 0.100164  [ 6464/60000]\n",
      "loss: 0.081304  [12864/60000]\n",
      "loss: 0.139226  [19264/60000]\n",
      "loss: 0.043736  [25664/60000]\n",
      "loss: 0.097499  [32064/60000]\n",
      "loss: 0.100292  [38464/60000]\n",
      "loss: 0.160949  [44864/60000]\n",
      "loss: 0.143649  [51264/60000]\n",
      "loss: 0.168554  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.2% \n",
      "\n",
      "Epoch:21\n",
      "\n",
      "loss: 0.065803  [   64/60000]\n",
      "loss: 0.097873  [ 6464/60000]\n",
      "loss: 0.080800  [12864/60000]\n",
      "loss: 0.137347  [19264/60000]\n",
      "loss: 0.042340  [25664/60000]\n",
      "loss: 0.095061  [32064/60000]\n",
      "loss: 0.100351  [38464/60000]\n",
      "loss: 0.158768  [44864/60000]\n",
      "loss: 0.141980  [51264/60000]\n",
      "loss: 0.166981  [57664/60000]\n",
      "Test Error: \n",
      " Accuracy: 97.2% \n",
      "\n"
     ]
    }
   ],
   "source": [
    "def test(data, model):\n",
    "    model.eval()\n",
    "    correct = 0\n",
    "    with torch.no_grad():\n",
    "        for x, y in data:\n",
    "            x, y = x.to(device), y.to(device)\n",
    "            pred = model(x)\n",
    "            correct += (pred.argmax(1) == y).type(torch.float).sum().item()\n",
    "    correct /= len(data.dataset)\n",
    "    return correct\n",
    "\n",
    "epochs = 21\n",
    "# 没有对数据进行优化，可以选择去对数据进行优化，比如正则化、添加droup out、添加resnet连接\n",
    "for epoch in range(epochs):\n",
    "    print('Epoch:{}\\n'.format(epoch+1))\n",
    "    for batch, (x,y) in enumerate(train_dataloader):\n",
    "        size = len(train_dataloader.dataset)\n",
    "        model.train()\n",
    "        x, y = x.to(device), y.to(device)\n",
    "\n",
    "        pred = model(x)\n",
    "        loss = loss_fn(pred, y)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "    \n",
    "        if batch %100 == 0:\n",
    "            loss, current = loss.item(), (batch+ 1)* len(x) \n",
    "            print(f\"loss: {loss:>7f}  [{current:>5d}/{size:>5d}]\")\n",
    "    correct = test(test_dataloader, model)\n",
    "    print(f\"Test Error: \\n Accuracy: {(100*correct):>0.1f}% \\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.save(model.state_dict(), \"./LeNet.pth\")\n",
    "model = LeNet().to(device)\n",
    "model.load_state_dict(torch.load(\"./LeNet.pth\"))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicted: \"1\", Actual: \"1\"\n"
     ]
    }
   ],
   "source": [
    "model.eval()\n",
    "x, y = test_data[180][0].view(1,1,28,28), test_data[180][1]\n",
    "with torch.no_grad():\n",
    "    x = x.to(device)\n",
    "    pred = model(x)\n",
    "    predicted, actual = pred[0].argmax(0), y\n",
    "    print(f'Predicted: \"{predicted}\", Actual: \"{actual}\"')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
