{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# softmax回归的简洁实现",
   "id": "a792cb8aab5e3d94"
  },
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:48.942176Z",
     "start_time": "2025-08-11T09:30:48.938943Z"
    }
   },
   "source": [
    "from typing import Union, Callable\n",
    "\n",
    "import torch\n",
    "from matplotlib.pyplot import figure\n",
    "from torch import nn\n",
    "from d2l import torch as d2l\n",
    "import os\n",
    "device = torch.device('cuda:0')"
   ],
   "outputs": [],
   "execution_count": 38
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:48.994451Z",
     "start_time": "2025-08-11T09:30:48.988476Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import time\n",
    "from typing import Any\n",
    "\n",
    "import torch\n",
    "import torchvision\n",
    "from torch.utils import data\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import transforms\n",
    "from d2l import torch as d2l\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "\n",
    "def load_data_fashion_mnist(batch_size: int, cpu_workers: int, resize=None, downloaded=True) -> tuple[\n",
    "    DataLoader[Any], DataLoader[Any]]:\n",
    "    trans = [transforms.ToTensor()]  ## 由于可能会有多个预处理步骤，Compose函数在接受预处理流程时需要接收一个列表传入\n",
    "    if resize:\n",
    "        trans.insert(0, transforms.Resize(resize))\n",
    "    trans = transforms.Compose(trans)\n",
    "    mnist_train = torchvision.datasets.FashionMNIST(\n",
    "        root='./data',\n",
    "        transform=trans,\n",
    "        download=(not downloaded)\n",
    "    )\n",
    "    mnist_test = torchvision.datasets.FashionMNIST(\n",
    "        root='./data',\n",
    "        transform=trans,\n",
    "        download=(not downloaded)\n",
    "    )\n",
    "    return (\n",
    "        data.DataLoader(mnist_train, batch_size, shuffle=True, num_workers=cpu_workers,pin_memory=True),\n",
    "        data.DataLoader(mnist_test, batch_size, shuffle=False, num_workers=cpu_workers,pin_memory=True)\n",
    "    )\n"
   ],
   "id": "262460602a06e410",
   "outputs": [],
   "execution_count": 39
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.113301Z",
     "start_time": "2025-08-11T09:30:49.077790Z"
    }
   },
   "cell_type": "code",
   "source": [
    "batch_size = 256\n",
    "train_iter , test_iter = load_data_fashion_mnist(\n",
    "    batch_size,\n",
    "    cpu_workers=max(0, int(os.cpu_count() / 2) - 2)\n",
    ")"
   ],
   "id": "6e8cc1406a1df958",
   "outputs": [],
   "execution_count": 40
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.166310Z",
     "start_time": "2025-08-11T09:30:49.162807Z"
    }
   },
   "cell_type": "code",
   "source": [
    "net = nn.Sequential(\n",
    "    nn.Flatten(),## 把传入的DatasetLoader对象多维张量（也就是矩阵）展平为二维张量（矩阵），\n",
    "    ## 其中dim[0]保留，（也就是记录第几个样本的维度保留），其余维度转为向量\n",
    "    ## 如一个5*10*10的DatasetLoader对象最后转为5*100的矩阵\n",
    "    nn.Linear(784,10)\n",
    ").to(device)"
   ],
   "id": "840bbbf6ad2a1260",
   "outputs": [],
   "execution_count": 41
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.243827Z",
     "start_time": "2025-08-11T09:30:49.240826Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def init_weights(m:Union[Callable,torch.nn.Linear]):\n",
    "    '''初始化权重矩阵'''\n",
    "    if isinstance(m,nn.Linear):\n",
    "        nn.init.normal_(m.weight)"
   ],
   "id": "689029fb529d12b0",
   "outputs": [],
   "execution_count": 42
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.311861Z",
     "start_time": "2025-08-11T09:30:49.307611Z"
    }
   },
   "cell_type": "code",
   "source": "net.apply(init_weights) ## 对nn全部层应用init_weights操作",
   "id": "4dae4f3b43b803c0",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Sequential(\n",
       "  (0): Flatten(start_dim=1, end_dim=-1)\n",
       "  (1): Linear(in_features=784, out_features=10, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 43
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.370978Z",
     "start_time": "2025-08-11T09:30:49.367865Z"
    }
   },
   "cell_type": "code",
   "source": "loss = nn.CrossEntropyLoss()",
   "id": "8163b8247fda893b",
   "outputs": [],
   "execution_count": 44
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.431906Z",
     "start_time": "2025-08-11T09:30:49.428899Z"
    }
   },
   "cell_type": "code",
   "source": "trainer = torch.optim.SGD(net.parameters(),lr = 0.1)",
   "id": "ff378f282d349abf",
   "outputs": [],
   "execution_count": 45
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.492404Z",
     "start_time": "2025-08-11T09:30:49.488970Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Accumulator:\n",
    "    def __init__(self,n):\n",
    "        self.data = [0.0] * n ##创建n个计数器\n",
    "    def add(self,*args):\n",
    "        self.data = [a + float(b) for a,b in zip(self.data,args)]\n",
    "    def __getitem__(self, idx):\n",
    "        return self.data[idx]"
   ],
   "id": "aa912f44e734c0a4",
   "outputs": [],
   "execution_count": 46
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.555043Z",
     "start_time": "2025-08-11T09:30:49.551225Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def accuracy(y_predict: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n",
    "    if len(y_predict.shape) > 1 and y_predict.shape[1] > 1:\n",
    "        y_predict = y_predict.argmax(axis=1)  # 获取最大元素的索引\n",
    "    cmp = y_predict.type(y.dtype) == y\n",
    "    return float(cmp.type(y.dtype).sum())  # 修正返回值类型\n",
    "\n"
   ],
   "id": "f31f48b4d4202840",
   "outputs": [],
   "execution_count": 47
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T09:30:49.614035Z",
     "start_time": "2025-08-11T09:30:49.610031Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\n",
    "def evaluate_accuracy(net: Union[Callable, torch.nn.Module],\n",
    "                      data_iter: torch.utils.data.DataLoader,\n",
    "                      device: torch.device = device):\n",
    "    \"\"\"评估模型在指定数据集上的精度\"\"\"\n",
    "    if isinstance(net, torch.nn.Module):\n",
    "        net.eval()\n",
    "    metric = Accumulator(2)\n",
    "\n",
    "    with torch.no_grad():  # 不计算梯度，节省内存\n",
    "        for X, y in data_iter:\n",
    "            # 将数据移动到与模型相同的设备\n",
    "            X, y = X.to(device), y.to(device)\n",
    "            metric.add(\n",
    "                accuracy(net(X), y),\n",
    "                y.numel()\n",
    "            )\n",
    "    return metric[0] / metric[1]"
   ],
   "id": "c83c1df4dc81015f",
   "outputs": [],
   "execution_count": 48
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T12:05:22.825335Z",
     "start_time": "2025-08-11T09:30:49.669458Z"
    }
   },
   "cell_type": "code",
   "source": [
    "num_epochs = 1000\n",
    "metric = Accumulator(3)\n",
    "train_loss_record = list()\n",
    "train_acc_record = list()\n",
    "test_acc_record = list()\n",
    "for epoch in range(num_epochs):\n",
    "    # 训练模式\n",
    "    net.train()\n",
    "    metric = Accumulator(3)  # 重置每个epoch的指标\n",
    "\n",
    "    for X, y in train_iter:\n",
    "        # 将数据移动到与模型相同的设备\n",
    "        X, y = X.to(device), y.to(device)\n",
    "\n",
    "        y_predict = net(X)\n",
    "        l = loss(y_predict, y)\n",
    "\n",
    "        trainer.zero_grad()\n",
    "        l.mean().backward()\n",
    "        trainer.step()\n",
    "\n",
    "        metric.add(float(l.sum()), accuracy(y_predict, y), y.numel())\n",
    "\n",
    "    # 计算训练指标\n",
    "    train_loss = metric[0] / metric[2]\n",
    "    train_acc = metric[1] / metric[2]\n",
    "\n",
    "    # 评估测试集\n",
    "    test_acc = evaluate_accuracy(net, test_iter, device)\n",
    "\n",
    "    if epoch % 5 == 0:\n",
    "        test_acc = evaluate_accuracy(net, test_iter, device)\n",
    "        print(f\"Epoch {epoch}: Train Loss {train_loss:.4f}, Test Acc {test_acc:.4f}\")\n"
   ],
   "id": "7e6d2e401f9465ab",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 0: Train Loss 0.0165, Test Acc 0.5691\n",
      "Epoch 5: Train Loss 0.0048, Test Acc 0.7343\n",
      "Epoch 10: Train Loss 0.0039, Test Acc 0.7694\n",
      "Epoch 15: Train Loss 0.0034, Test Acc 0.7855\n",
      "Epoch 20: Train Loss 0.0032, Test Acc 0.7920\n",
      "Epoch 25: Train Loss 0.0030, Test Acc 0.8040\n",
      "Epoch 30: Train Loss 0.0028, Test Acc 0.8003\n",
      "Epoch 35: Train Loss 0.0027, Test Acc 0.8128\n",
      "Epoch 40: Train Loss 0.0026, Test Acc 0.8045\n",
      "Epoch 45: Train Loss 0.0025, Test Acc 0.8173\n",
      "Epoch 50: Train Loss 0.0024, Test Acc 0.8249\n",
      "Epoch 55: Train Loss 0.0023, Test Acc 0.8273\n",
      "Epoch 60: Train Loss 0.0023, Test Acc 0.8293\n",
      "Epoch 65: Train Loss 0.0022, Test Acc 0.8233\n",
      "Epoch 70: Train Loss 0.0022, Test Acc 0.8240\n",
      "Epoch 75: Train Loss 0.0022, Test Acc 0.8328\n",
      "Epoch 80: Train Loss 0.0021, Test Acc 0.8363\n",
      "Epoch 85: Train Loss 0.0021, Test Acc 0.8381\n",
      "Epoch 90: Train Loss 0.0021, Test Acc 0.8375\n",
      "Epoch 95: Train Loss 0.0020, Test Acc 0.8409\n",
      "Epoch 100: Train Loss 0.0020, Test Acc 0.8375\n",
      "Epoch 105: Train Loss 0.0020, Test Acc 0.8433\n",
      "Epoch 110: Train Loss 0.0019, Test Acc 0.8434\n",
      "Epoch 115: Train Loss 0.0019, Test Acc 0.8398\n",
      "Epoch 120: Train Loss 0.0019, Test Acc 0.8409\n",
      "Epoch 125: Train Loss 0.0019, Test Acc 0.8465\n",
      "Epoch 130: Train Loss 0.0019, Test Acc 0.8458\n",
      "Epoch 135: Train Loss 0.0018, Test Acc 0.8402\n",
      "Epoch 140: Train Loss 0.0018, Test Acc 0.8492\n",
      "Epoch 145: Train Loss 0.0018, Test Acc 0.8510\n",
      "Epoch 150: Train Loss 0.0018, Test Acc 0.8492\n",
      "Epoch 155: Train Loss 0.0018, Test Acc 0.8486\n",
      "Epoch 160: Train Loss 0.0018, Test Acc 0.8491\n",
      "Epoch 165: Train Loss 0.0018, Test Acc 0.8525\n",
      "Epoch 170: Train Loss 0.0018, Test Acc 0.8503\n",
      "Epoch 175: Train Loss 0.0017, Test Acc 0.8544\n",
      "Epoch 180: Train Loss 0.0017, Test Acc 0.8530\n",
      "Epoch 185: Train Loss 0.0017, Test Acc 0.8417\n",
      "Epoch 190: Train Loss 0.0017, Test Acc 0.8517\n",
      "Epoch 195: Train Loss 0.0017, Test Acc 0.8508\n",
      "Epoch 200: Train Loss 0.0017, Test Acc 0.8525\n",
      "Epoch 205: Train Loss 0.0017, Test Acc 0.8539\n",
      "Epoch 210: Train Loss 0.0017, Test Acc 0.8586\n",
      "Epoch 215: Train Loss 0.0017, Test Acc 0.8581\n",
      "Epoch 220: Train Loss 0.0017, Test Acc 0.8544\n",
      "Epoch 225: Train Loss 0.0017, Test Acc 0.8547\n",
      "Epoch 230: Train Loss 0.0016, Test Acc 0.8601\n",
      "Epoch 235: Train Loss 0.0016, Test Acc 0.8508\n",
      "Epoch 240: Train Loss 0.0016, Test Acc 0.8593\n",
      "Epoch 245: Train Loss 0.0016, Test Acc 0.8552\n",
      "Epoch 250: Train Loss 0.0016, Test Acc 0.8617\n",
      "Epoch 255: Train Loss 0.0016, Test Acc 0.8564\n",
      "Epoch 260: Train Loss 0.0016, Test Acc 0.8500\n",
      "Epoch 265: Train Loss 0.0016, Test Acc 0.8621\n",
      "Epoch 270: Train Loss 0.0016, Test Acc 0.8607\n",
      "Epoch 275: Train Loss 0.0016, Test Acc 0.8573\n",
      "Epoch 280: Train Loss 0.0016, Test Acc 0.8635\n",
      "Epoch 285: Train Loss 0.0016, Test Acc 0.8646\n",
      "Epoch 290: Train Loss 0.0016, Test Acc 0.8612\n",
      "Epoch 295: Train Loss 0.0016, Test Acc 0.8605\n",
      "Epoch 300: Train Loss 0.0016, Test Acc 0.8623\n",
      "Epoch 305: Train Loss 0.0016, Test Acc 0.8437\n",
      "Epoch 310: Train Loss 0.0016, Test Acc 0.8629\n",
      "Epoch 315: Train Loss 0.0016, Test Acc 0.8605\n",
      "Epoch 320: Train Loss 0.0016, Test Acc 0.8602\n",
      "Epoch 325: Train Loss 0.0016, Test Acc 0.8608\n",
      "Epoch 330: Train Loss 0.0016, Test Acc 0.8599\n",
      "Epoch 335: Train Loss 0.0015, Test Acc 0.8651\n",
      "Epoch 340: Train Loss 0.0015, Test Acc 0.8663\n",
      "Epoch 345: Train Loss 0.0015, Test Acc 0.8602\n",
      "Epoch 350: Train Loss 0.0015, Test Acc 0.8661\n",
      "Epoch 355: Train Loss 0.0015, Test Acc 0.8641\n",
      "Epoch 360: Train Loss 0.0015, Test Acc 0.8641\n",
      "Epoch 365: Train Loss 0.0015, Test Acc 0.8627\n",
      "Epoch 370: Train Loss 0.0015, Test Acc 0.8626\n",
      "Epoch 375: Train Loss 0.0015, Test Acc 0.8665\n",
      "Epoch 380: Train Loss 0.0015, Test Acc 0.8681\n",
      "Epoch 385: Train Loss 0.0015, Test Acc 0.8687\n",
      "Epoch 390: Train Loss 0.0015, Test Acc 0.8672\n",
      "Epoch 395: Train Loss 0.0015, Test Acc 0.8659\n",
      "Epoch 400: Train Loss 0.0015, Test Acc 0.8689\n",
      "Epoch 405: Train Loss 0.0015, Test Acc 0.8696\n",
      "Epoch 410: Train Loss 0.0015, Test Acc 0.8639\n",
      "Epoch 415: Train Loss 0.0015, Test Acc 0.8639\n",
      "Epoch 420: Train Loss 0.0015, Test Acc 0.8630\n",
      "Epoch 425: Train Loss 0.0015, Test Acc 0.8636\n",
      "Epoch 430: Train Loss 0.0015, Test Acc 0.8701\n",
      "Epoch 435: Train Loss 0.0015, Test Acc 0.8656\n",
      "Epoch 440: Train Loss 0.0015, Test Acc 0.8698\n",
      "Epoch 445: Train Loss 0.0015, Test Acc 0.8599\n",
      "Epoch 450: Train Loss 0.0015, Test Acc 0.8657\n",
      "Epoch 455: Train Loss 0.0015, Test Acc 0.8690\n",
      "Epoch 460: Train Loss 0.0015, Test Acc 0.8601\n",
      "Epoch 465: Train Loss 0.0015, Test Acc 0.8700\n",
      "Epoch 470: Train Loss 0.0015, Test Acc 0.8707\n",
      "Epoch 475: Train Loss 0.0015, Test Acc 0.8709\n",
      "Epoch 480: Train Loss 0.0015, Test Acc 0.8680\n",
      "Epoch 485: Train Loss 0.0015, Test Acc 0.8682\n",
      "Epoch 490: Train Loss 0.0015, Test Acc 0.8592\n",
      "Epoch 495: Train Loss 0.0015, Test Acc 0.8677\n",
      "Epoch 500: Train Loss 0.0015, Test Acc 0.8589\n",
      "Epoch 505: Train Loss 0.0015, Test Acc 0.8710\n",
      "Epoch 510: Train Loss 0.0015, Test Acc 0.8710\n",
      "Epoch 515: Train Loss 0.0015, Test Acc 0.8694\n",
      "Epoch 520: Train Loss 0.0015, Test Acc 0.8695\n",
      "Epoch 525: Train Loss 0.0015, Test Acc 0.8729\n",
      "Epoch 530: Train Loss 0.0015, Test Acc 0.8710\n",
      "Epoch 535: Train Loss 0.0015, Test Acc 0.8714\n",
      "Epoch 540: Train Loss 0.0015, Test Acc 0.8482\n",
      "Epoch 545: Train Loss 0.0015, Test Acc 0.8622\n",
      "Epoch 550: Train Loss 0.0015, Test Acc 0.8727\n",
      "Epoch 555: Train Loss 0.0015, Test Acc 0.8726\n",
      "Epoch 560: Train Loss 0.0015, Test Acc 0.8708\n",
      "Epoch 565: Train Loss 0.0015, Test Acc 0.8709\n",
      "Epoch 570: Train Loss 0.0014, Test Acc 0.8730\n",
      "Epoch 575: Train Loss 0.0014, Test Acc 0.8698\n",
      "Epoch 580: Train Loss 0.0014, Test Acc 0.8627\n",
      "Epoch 585: Train Loss 0.0014, Test Acc 0.8638\n",
      "Epoch 590: Train Loss 0.0014, Test Acc 0.8683\n",
      "Epoch 595: Train Loss 0.0014, Test Acc 0.8719\n",
      "Epoch 600: Train Loss 0.0014, Test Acc 0.8698\n",
      "Epoch 605: Train Loss 0.0014, Test Acc 0.8729\n",
      "Epoch 610: Train Loss 0.0014, Test Acc 0.8726\n",
      "Epoch 615: Train Loss 0.0014, Test Acc 0.8729\n",
      "Epoch 620: Train Loss 0.0014, Test Acc 0.8741\n",
      "Epoch 625: Train Loss 0.0014, Test Acc 0.8707\n",
      "Epoch 630: Train Loss 0.0014, Test Acc 0.8739\n",
      "Epoch 635: Train Loss 0.0014, Test Acc 0.8662\n",
      "Epoch 640: Train Loss 0.0014, Test Acc 0.8741\n",
      "Epoch 645: Train Loss 0.0014, Test Acc 0.8731\n",
      "Epoch 650: Train Loss 0.0014, Test Acc 0.8677\n",
      "Epoch 655: Train Loss 0.0014, Test Acc 0.8707\n",
      "Epoch 660: Train Loss 0.0014, Test Acc 0.8647\n",
      "Epoch 665: Train Loss 0.0014, Test Acc 0.8732\n",
      "Epoch 670: Train Loss 0.0014, Test Acc 0.8735\n",
      "Epoch 675: Train Loss 0.0014, Test Acc 0.8733\n",
      "Epoch 680: Train Loss 0.0014, Test Acc 0.8696\n",
      "Epoch 685: Train Loss 0.0014, Test Acc 0.8751\n",
      "Epoch 690: Train Loss 0.0014, Test Acc 0.8742\n",
      "Epoch 695: Train Loss 0.0014, Test Acc 0.8698\n",
      "Epoch 700: Train Loss 0.0014, Test Acc 0.8754\n",
      "Epoch 705: Train Loss 0.0014, Test Acc 0.8735\n",
      "Epoch 710: Train Loss 0.0014, Test Acc 0.8687\n",
      "Epoch 715: Train Loss 0.0014, Test Acc 0.8679\n",
      "Epoch 720: Train Loss 0.0014, Test Acc 0.8737\n",
      "Epoch 725: Train Loss 0.0014, Test Acc 0.8735\n",
      "Epoch 730: Train Loss 0.0014, Test Acc 0.8761\n",
      "Epoch 735: Train Loss 0.0014, Test Acc 0.8736\n",
      "Epoch 740: Train Loss 0.0014, Test Acc 0.8699\n",
      "Epoch 745: Train Loss 0.0014, Test Acc 0.8752\n",
      "Epoch 750: Train Loss 0.0014, Test Acc 0.8677\n",
      "Epoch 755: Train Loss 0.0014, Test Acc 0.8704\n",
      "Epoch 760: Train Loss 0.0014, Test Acc 0.8741\n",
      "Epoch 765: Train Loss 0.0014, Test Acc 0.8748\n",
      "Epoch 770: Train Loss 0.0014, Test Acc 0.8734\n",
      "Epoch 775: Train Loss 0.0014, Test Acc 0.8714\n",
      "Epoch 780: Train Loss 0.0014, Test Acc 0.8727\n",
      "Epoch 785: Train Loss 0.0014, Test Acc 0.8698\n",
      "Epoch 790: Train Loss 0.0014, Test Acc 0.8754\n",
      "Epoch 795: Train Loss 0.0014, Test Acc 0.8755\n",
      "Epoch 800: Train Loss 0.0014, Test Acc 0.8747\n",
      "Epoch 805: Train Loss 0.0014, Test Acc 0.8745\n",
      "Epoch 810: Train Loss 0.0014, Test Acc 0.8682\n",
      "Epoch 815: Train Loss 0.0014, Test Acc 0.8713\n",
      "Epoch 820: Train Loss 0.0014, Test Acc 0.8766\n",
      "Epoch 825: Train Loss 0.0014, Test Acc 0.8740\n",
      "Epoch 830: Train Loss 0.0014, Test Acc 0.8662\n",
      "Epoch 835: Train Loss 0.0014, Test Acc 0.8749\n",
      "Epoch 840: Train Loss 0.0014, Test Acc 0.8748\n",
      "Epoch 845: Train Loss 0.0014, Test Acc 0.8730\n",
      "Epoch 850: Train Loss 0.0014, Test Acc 0.8733\n",
      "Epoch 855: Train Loss 0.0014, Test Acc 0.8767\n",
      "Epoch 860: Train Loss 0.0014, Test Acc 0.8719\n",
      "Epoch 865: Train Loss 0.0014, Test Acc 0.8765\n",
      "Epoch 870: Train Loss 0.0014, Test Acc 0.8716\n",
      "Epoch 875: Train Loss 0.0014, Test Acc 0.8726\n",
      "Epoch 880: Train Loss 0.0014, Test Acc 0.8752\n",
      "Epoch 885: Train Loss 0.0014, Test Acc 0.8692\n",
      "Epoch 890: Train Loss 0.0014, Test Acc 0.8699\n",
      "Epoch 895: Train Loss 0.0014, Test Acc 0.8680\n",
      "Epoch 900: Train Loss 0.0014, Test Acc 0.8690\n",
      "Epoch 905: Train Loss 0.0014, Test Acc 0.8778\n",
      "Epoch 910: Train Loss 0.0014, Test Acc 0.8770\n",
      "Epoch 915: Train Loss 0.0014, Test Acc 0.8756\n",
      "Epoch 920: Train Loss 0.0014, Test Acc 0.8761\n",
      "Epoch 925: Train Loss 0.0014, Test Acc 0.8731\n",
      "Epoch 930: Train Loss 0.0014, Test Acc 0.8699\n",
      "Epoch 935: Train Loss 0.0014, Test Acc 0.8712\n",
      "Epoch 940: Train Loss 0.0014, Test Acc 0.8777\n",
      "Epoch 945: Train Loss 0.0014, Test Acc 0.8720\n",
      "Epoch 950: Train Loss 0.0014, Test Acc 0.8725\n",
      "Epoch 955: Train Loss 0.0014, Test Acc 0.8743\n",
      "Epoch 960: Train Loss 0.0014, Test Acc 0.8752\n",
      "Epoch 965: Train Loss 0.0014, Test Acc 0.8761\n",
      "Epoch 970: Train Loss 0.0014, Test Acc 0.8731\n",
      "Epoch 975: Train Loss 0.0014, Test Acc 0.8773\n",
      "Epoch 980: Train Loss 0.0014, Test Acc 0.8721\n",
      "Epoch 985: Train Loss 0.0014, Test Acc 0.8759\n",
      "Epoch 990: Train Loss 0.0014, Test Acc 0.8770\n",
      "Epoch 995: Train Loss 0.0014, Test Acc 0.8774\n"
     ]
    }
   ],
   "execution_count": 49
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T12:05:58.445398Z",
     "start_time": "2025-08-11T12:05:22.881855Z"
    }
   },
   "cell_type": "code",
   "source": [
    "## 查看epoch与train-acc, train-loss及test-loss的关系\n",
    "from matplotlib import pyplot as plt\n",
    "fig = plt.figure()\n",
    "ax1 = fig.add_subplot(131)\n",
    "ax1.plot(range(num_epochs),train_acc_record)\n",
    "ax1.set_xlabel('epoch')\n",
    "ax1.set_ylabel('acc')\n",
    "ax2 = fig.add_subplot(132)\n",
    "ax2.plot(range(num_epochs),train_loss_record)\n",
    "ax2.set_xlabel('epoch')\n",
    "ax2.set_ylabel('loss')\n",
    "ax3 = fig.add_subplot(133)\n",
    "ax3.plot(range(num_epochs),test_acc_record)\n",
    "ax3.set_xlabel('epoch')\n",
    "ax3.set_ylabel('test acc')\n",
    "plt.show()"
   ],
   "id": "39052a501652ae41",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Exception ignored in: <function _MultiProcessingDataLoaderIter.__del__ at 0x0000023BFEC0CC20>\n",
      "Traceback (most recent call last):\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1663, in __del__\n",
      "    self._shutdown_workers()\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1627, in _shutdown_workers\n",
      "    w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\multiprocessing\\process.py\", line 149, in join\n",
      "    res = self._popen.wait(timeout)\n",
      "          ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\multiprocessing\\popen_spawn_win32.py\", line 112, in wait\n",
      "    res = _winapi.WaitForSingleObject(int(self._handle), msecs)\n",
      "          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
      "KeyboardInterrupt: \n",
      "Exception ignored in: <function _MultiProcessingDataLoaderIter.__del__ at 0x0000023BFEC0CC20>\n",
      "Traceback (most recent call last):\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1663, in __del__\n",
      "    self._shutdown_workers()\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1627, in _shutdown_workers\n",
      "    w.join(timeout=_utils.MP_STATUS_CHECK_INTERVAL)\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\multiprocessing\\process.py\", line 149, in join\n",
      "    res = self._popen.wait(timeout)\n",
      "          ^^^^^^^^^^^^^^^^^^^^^^^^^\n",
      "  File \"C:\\Users\\aleclanned\\.local\\share\\mamba\\envs\\d2l\\Lib\\multiprocessing\\popen_spawn_win32.py\", line 112, in wait\n",
      "    res = _winapi.WaitForSingleObject(int(self._handle), msecs)\n",
      "          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
      "KeyboardInterrupt: \n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "x and y must have same first dimension, but have shapes (1000,) and (0,)",
     "output_type": "error",
     "traceback": [
      "\u001B[31m---------------------------------------------------------------------------\u001B[39m",
      "\u001B[31mValueError\u001B[39m                                Traceback (most recent call last)",
      "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[50]\u001B[39m\u001B[32m, line 5\u001B[39m\n\u001B[32m      3\u001B[39m fig = plt.figure()\n\u001B[32m      4\u001B[39m ax1 = fig.add_subplot(\u001B[32m131\u001B[39m)\n\u001B[32m----> \u001B[39m\u001B[32m5\u001B[39m \u001B[43max1\u001B[49m\u001B[43m.\u001B[49m\u001B[43mplot\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mrange\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mnum_epochs\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43mtrain_acc_record\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m      6\u001B[39m ax1.set_xlabel(\u001B[33m'\u001B[39m\u001B[33mepoch\u001B[39m\u001B[33m'\u001B[39m)\n\u001B[32m      7\u001B[39m ax1.set_ylabel(\u001B[33m'\u001B[39m\u001B[33macc\u001B[39m\u001B[33m'\u001B[39m)\n",
      "\u001B[36mFile \u001B[39m\u001B[32m~\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\matplotlib\\axes\\_axes.py:1777\u001B[39m, in \u001B[36mAxes.plot\u001B[39m\u001B[34m(self, scalex, scaley, data, *args, **kwargs)\u001B[39m\n\u001B[32m   1534\u001B[39m \u001B[38;5;250m\u001B[39m\u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m   1535\u001B[39m \u001B[33;03mPlot y versus x as lines and/or markers.\u001B[39;00m\n\u001B[32m   1536\u001B[39m \n\u001B[32m   (...)\u001B[39m\u001B[32m   1774\u001B[39m \u001B[33;03m(``'green'``) or hex strings (``'#008000'``).\u001B[39;00m\n\u001B[32m   1775\u001B[39m \u001B[33;03m\"\"\"\u001B[39;00m\n\u001B[32m   1776\u001B[39m kwargs = cbook.normalize_kwargs(kwargs, mlines.Line2D)\n\u001B[32m-> \u001B[39m\u001B[32m1777\u001B[39m lines = [*\u001B[38;5;28mself\u001B[39m._get_lines(\u001B[38;5;28mself\u001B[39m, *args, data=data, **kwargs)]\n\u001B[32m   1778\u001B[39m \u001B[38;5;28;01mfor\u001B[39;00m line \u001B[38;5;129;01min\u001B[39;00m lines:\n\u001B[32m   1779\u001B[39m     \u001B[38;5;28mself\u001B[39m.add_line(line)\n",
      "\u001B[36mFile \u001B[39m\u001B[32m~\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\matplotlib\\axes\\_base.py:297\u001B[39m, in \u001B[36m_process_plot_var_args.__call__\u001B[39m\u001B[34m(self, axes, data, return_kwargs, *args, **kwargs)\u001B[39m\n\u001B[32m    295\u001B[39m     this += args[\u001B[32m0\u001B[39m],\n\u001B[32m    296\u001B[39m     args = args[\u001B[32m1\u001B[39m:]\n\u001B[32m--> \u001B[39m\u001B[32m297\u001B[39m \u001B[38;5;28;01myield from\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_plot_args\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m    298\u001B[39m \u001B[43m    \u001B[49m\u001B[43maxes\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mthis\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mkwargs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mambiguous_fmt_datakey\u001B[49m\u001B[43m=\u001B[49m\u001B[43mambiguous_fmt_datakey\u001B[49m\u001B[43m,\u001B[49m\n\u001B[32m    299\u001B[39m \u001B[43m    \u001B[49m\u001B[43mreturn_kwargs\u001B[49m\u001B[43m=\u001B[49m\u001B[43mreturn_kwargs\u001B[49m\n\u001B[32m    300\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32m~\\.local\\share\\mamba\\envs\\d2l\\Lib\\site-packages\\matplotlib\\axes\\_base.py:494\u001B[39m, in \u001B[36m_process_plot_var_args._plot_args\u001B[39m\u001B[34m(self, axes, tup, kwargs, return_kwargs, ambiguous_fmt_datakey)\u001B[39m\n\u001B[32m    491\u001B[39m     axes.yaxis.update_units(y)\n\u001B[32m    493\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m x.shape[\u001B[32m0\u001B[39m] != y.shape[\u001B[32m0\u001B[39m]:\n\u001B[32m--> \u001B[39m\u001B[32m494\u001B[39m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(\u001B[33mf\u001B[39m\u001B[33m\"\u001B[39m\u001B[33mx and y must have same first dimension, but \u001B[39m\u001B[33m\"\u001B[39m\n\u001B[32m    495\u001B[39m                      \u001B[33mf\u001B[39m\u001B[33m\"\u001B[39m\u001B[33mhave shapes \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mx.shape\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m and \u001B[39m\u001B[38;5;132;01m{\u001B[39;00my.shape\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m\"\u001B[39m)\n\u001B[32m    496\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m x.ndim > \u001B[32m2\u001B[39m \u001B[38;5;129;01mor\u001B[39;00m y.ndim > \u001B[32m2\u001B[39m:\n\u001B[32m    497\u001B[39m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(\u001B[33mf\u001B[39m\u001B[33m\"\u001B[39m\u001B[33mx and y can be no greater than 2D, but have \u001B[39m\u001B[33m\"\u001B[39m\n\u001B[32m    498\u001B[39m                      \u001B[33mf\u001B[39m\u001B[33m\"\u001B[39m\u001B[33mshapes \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mx.shape\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m and \u001B[39m\u001B[38;5;132;01m{\u001B[39;00my.shape\u001B[38;5;132;01m}\u001B[39;00m\u001B[33m\"\u001B[39m)\n",
      "\u001B[31mValueError\u001B[39m: x and y must have same first dimension, but have shapes (1000,) and (0,)"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<Figure size 640x480 with 1 Axes>"
      ],
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAANAAAAGiCAYAAACWOUgKAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjUsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvWftoOwAAAAlwSFlzAAAPYQAAD2EBqD+naQAAFNBJREFUeJzt3H9s1Hcdx/FXW7gry+ixWblSPNbgsh9uo92KrWUuBHPaZKSTP4zdNLQhY7ikGqHRQd1oReZumokk2g0lAibOwFzcNIO0mReaRa0haWnC2MDwY7Qu3kFjuGPdaN3dxz8Wb7m1Hf323esPfT6S7x/32edz308v3yd3vd4tzznnBGBS8md6A8BcRkCAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQICB54Bee+011dXVqbS0VHl5eXr55Zevuaarq0v33HOP/H6/br75Zh04cGASWwVmH88BDQ0Nqby8XO3t7ROaf/78ea1du1Zr1qxRX1+fNm/erI0bN6qzs9PzZoHZJs/yYdK8vDy99NJLWrdu3bhztm7dqsOHD+v111/PjD344IO6fPmyOjo6JntqYFaYl+sTdHd3KxwOZ43V1tZq8+bN464ZHh7W8PBw5nY6nda//vUvfeITn1BeXl6utor/cc45XblyRaWlpcrPn5pf/3MeUCwWUzAYzBoLBoNKJpN67733tGDBglFrIpGIduzYkeut4f/UwMCAPvWpT03JfeU8oMloaWlRc3Nz5nYikdCyZcs0MDCgoqKiGdwZ5rJkMqlQKKSFCxdO2X3mPKCSkhLF4/GssXg8rqKiojGffSTJ7/fL7/ePGi8qKiIgmE3lrwE5/ztQTU2NotFo1tirr76qmpqaXJ8ayDnPAb3zzjvq6+tTX1+fpA/epu7r61N/f7+kD15+NTQ0ZOY/+uijOnfunB577DGdOnVKzz77rF544QVt2bJlan4CYCY5j44ePeokjToaGxudc841Nja61atXj1pTUVHhfD6fW758udu/f7+ncyYSCSfJJRIJr9sFMnJxHZn+DjRdksmkAoGAEokEvwNh0nJxHfFZOMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMJhVQe3u7ysrKVFhYqOrqah07duxj5+/evVu33nqrFixYoFAopC1btujq1auT2jAwqziPDh486Hw+n9u3b587efKke+SRR9yiRYtcPB4fc/7zzz/v/H6/e/7559358+ddZ2enW7JkiduyZcuEz5lIJJwkl0gkvG4XyMjFdeQ5oKqqKtfU1JS5nUqlXGlpqYtEImPOb2pqcl/4wheyxpqbm92999474XMSEKZCLq4jTy/hRkZG1NPTo3A4nBnLz89XOBxWd3f3mGtWrVqlnp6ezMu8c+fO6ciRI7r//vvHPc/w8LCSyWTWAcxG87xMHhwcVCqVUjAYzBoPBoM6derUmGu+9rWvaXBwUJ///OflnNP777+vRx99VN/73vfGPU8kEtGOHTu8bA2YETl/F66rq0tPPfWUnn32WfX29ur3v/+9Dh8+rJ07d467pqWlRYlEInMMDAzkepvApHh6BiouLlZBQYHi8XjWeDweV0lJyZhrtm/frvXr12vjxo2SpLvuuktDQ0PatGmTHn/8ceXnj27Y7/fL7/d72RowIzw9A/l8PlVWVioajWbG0um0otGoampqxlzz7rvvjoqkoKBAkuSc87pfYFbx9AwkSc3NzWpsbNTKlStVVVWl3bt3a2hoSBs2bJAkNTQ0aOnSpYpEIpKkuro67dq1S3fffbeqq6t15swZbd++XXV1dZmQgLnKc0D19fW6dOmSWltbFYvFVFFRoY6OjswbC/39/VnPOE888YTy8vL0xBNP6O2339YnP/lJ1dXV6Yc//OHU/RTADMlzc+B1VDKZVCAQUCKRUFFR0UxvB3NULq4jPgsHGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgMGkAmpvb1dZWZkKCwtVXV2tY8eOfez8y5cvq6mpSUuWLJHf79ctt9yiI0eOTGrDwGwyz+uCQ4cOqbm5WXv27FF1dbV2796t2tpanT59WosXLx41f2RkRF/84he1ePFivfjii1q6dKkuXLigRYsWTcX+gZnlPKqqqnJNTU2Z26lUypWWlrpIJDLm/Oeee84tX77cjYyMeD1VRiKRcJJcIpGY9H0AubiOPL2EGxkZUU9Pj8LhcGYsPz9f4XBY3d3dY6754x//qJqaGjU1NSkYDOrOO+/UU089pVQqNe55hoeHlUwmsw5gNvIU0ODgoFKplILBYNZ4MBhULBYbc825c+f04osvKpVK6ciRI9q+fbt+8pOf6Mknnxz3PJFIRIFAIHOEQiEv2wSmTc7fhUun01q8eLF++ctfqrKyUvX19Xr88ce1Z8+ecde0tLQokUhkjoGBgVxvE5gUT28iFBcXq6CgQPF4PGs8Ho+rpKRkzDVLlizR/PnzVVBQkBm7/fbbFYvFNDIyIp/PN2qN3++X3+/3sjVgRnh6BvL5fKqsrFQ0Gs2MpdNpRaNR1dTUjLnm3nvv1ZkzZ5ROpzNjf//737VkyZIx4wHmFK/vOhw8eND5/X534MAB98Ybb7hNmza5RYsWuVgs5pxzbv369W7btm2Z+f39/W7hwoXum9/8pjt9+rR75ZVX3OLFi92TTz454XPyLhymQi6uI89/B6qvr9elS5fU2tqqWCymiooKdXR0ZN5Y6O/vV37+h09soVBInZ2d2rJli1asWKGlS5fq29/+trZu3TpV/wYAMybPOedmehPXkkwmFQgElEgkVFRUNNPbwRyVi+uIz8IBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYDCpgNrb21VWVqbCwkJVV1fr2LFjE1p38OBB5eXlad26dZM5LTDreA7o0KFDam5uVltbm3p7e1VeXq7a2lpdvHjxY9e99dZb+s53vqP77rtv0psFZhvPAe3atUuPPPKINmzYoM985jPas2ePrrvuOu3bt2/cNalUSl//+te1Y8cOLV++/JrnGB4eVjKZzDqA2chTQCMjI+rp6VE4HP7wDvLzFQ6H1d3dPe66H/zgB1q8eLEefvjhCZ0nEokoEAhkjlAo5GWbwLTxFNDg4KBSqZSCwWDWeDAYVCwWG3PNn//8Z/3qV7/S3r17J3yelpYWJRKJzDEwMOBlm8C0mZfLO79y5YrWr1+vvXv3qri4eMLr/H6//H5/DncGTA1PARUXF6ugoEDxeDxrPB6Pq6SkZNT8s2fP6q233lJdXV1mLJ1Of3DiefN0+vRpffrTn57MvoFZwdNLOJ/Pp8rKSkWj0cxYOp1WNBpVTU3NqPm33XabTpw4ob6+vszxwAMPaM2aNerr6+N3G8x5nl/CNTc3q7GxUStXrlRVVZV2796toaEhbdiwQZLU0NCgpUuXKhKJqLCwUHfeeWfW+kWLFknSqHFgLvIcUH19vS5duqTW1lbFYjFVVFSoo6Mj88ZCf3+/8vP5gAP+P+Q559xMb+JaksmkAoGAEomEioqKZno7mKNycR3xVAEYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAAQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQYEBBgQECAwaQCam9vV1lZmQoLC1VdXa1jx46NO3fv3r267777dMMNN+iGG25QOBz+2PnAXOI5oEOHDqm5uVltbW3q7e1VeXm5amtrdfHixTHnd3V16aGHHtLRo0fV3d2tUCikL33pS3r77bfNmwdmnPOoqqrKNTU1ZW6nUilXWlrqIpHIhNa///77buHChe7Xv/71uHOuXr3qEolE5hgYGHCSXCKR8LpdICORSEz5deTpGWhkZEQ9PT0Kh8OZsfz8fIXDYXV3d0/oPt599139+9//1o033jjunEgkokAgkDlCoZCXbQLTxlNAg4ODSqVSCgaDWePBYFCxWGxC97F161aVlpZmRfhRLS0tSiQSmWNgYMDLNoFpM286T/b000/r4MGD6urqUmFh4bjz/H6//H7/NO4MmBxPARUXF6ugoEDxeDxrPB6Pq6Sk5GPXPvPMM3r66af1pz/9SStWrPC+U2AW8vQSzufzqbKyUtFoNDOWTqcVjUZVU1Mz7rof//jH2rlzpzo6OrRy5crJ7xaYZTy/hGtublZjY6NWrlypqqoq7d69W0NDQ9qwYYMkqaGhQUuXLlUkEpEk/ehHP1Jra6t++9vfqqysLPO70vXXX6/rr79+Cn8UYPp5Dqi+vl6XLl1Sa2urYrGYKioq1NHRkXljob+/X/n5Hz6xPffccxoZGdFXvvKVrPtpa2vT97//fdvugRmW55xzM72Ja0kmkwoEAkokEioqKprp7WCOysV1xGfhAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDAgIMCAgAADAgIMCAgwICDAgIAAAwICDAgIMCAgwICAAAMCAgwICDCYVEDt7e0qKytTYWGhqqurdezYsY+d/7vf/U633XabCgsLddddd+nIkSOT2iww23gO6NChQ2publZbW5t6e3tVXl6u2tpaXbx4ccz5f/3rX/XQQw/p4Ycf1vHjx7Vu3TqtW7dOr7/+unnzwEzLc845Lwuqq6v12c9+Vj//+c8lSel0WqFQSN/61re0bdu2UfPr6+s1NDSkV155JTP2uc99ThUVFdqzZ8+Y5xgeHtbw8HDmdiKR0LJlyzQwMKCioiIv2wUyksmkQqGQLl++rEAgMDV36jwYHh52BQUF7qWXXsoab2hocA888MCYa0KhkPvpT3+aNdba2upWrFgx7nna2tqcJA6OnBxnz571ctl/rHnyYHBwUKlUSsFgMGs8GAzq1KlTY66JxWJjzo/FYuOep6WlRc3NzZnbly9f1k033aT+/v6p+5fjf8x//3XlWXp8/30lc+ONN07ZfXoKaLr4/X75/f5R44FAgIvjGoqKiniMriE/f+refPZ0T8XFxSooKFA8Hs8aj8fjKikpGXNNSUmJp/nAXOIpIJ/Pp8rKSkWj0cxYOp1WNBpVTU3NmGtqamqy5kvSq6++Ou58YE7x+kvTwYMHnd/vdwcOHHBvvPGG27Rpk1u0aJGLxWLOOefWr1/vtm3blpn/l7/8xc2bN88988wz7s0333RtbW1u/vz57sSJExM+59WrV11bW5u7evWq1+3+3+AxurZcPEaeA3LOuZ/97Gdu2bJlzufzuaqqKve3v/0t899Wr17tGhsbs+a/8MIL7pZbbnE+n8/dcccd7vDhw6ZNA7OF578DAfgQn4UDDAgIMCAgwICAAINZExBfkbg2L4/RgQMHlJeXl3UUFhZO426n12uvvaa6ujqVlpYqLy9PL7/88jXXdHV16Z577pHf79fNN9+sAwcOeD7vrAiIr0hcm9fHSPrgYz3//Oc/M8eFCxemccfTa2hoSOXl5Wpvb5/Q/PPnz2vt2rVas2aN+vr6tHnzZm3cuFGdnZ3eTjzT76M751xVVZVramrK3E6lUq60tNRFIpEx53/1q191a9euzRqrrq523/jGN3K6z5nk9THav3+/CwQC07S72UXSqG8MfNRjjz3m7rjjjqyx+vp6V1tb6+lcM/4MNDIyop6eHoXD4cxYfn6+wuGwuru7x1zT3d2dNV+Samtrx50/103mMZKkd955RzfddJNCoZC+/OUv6+TJk9Ox3Tlhqq6hGQ/o474iMd5XHibzFYm5bDKP0a233qp9+/bpD3/4g37zm98onU5r1apV+sc//jEdW571xruGksmk3nvvvQnfz6z8OgPsampqsj6wu2rVKt1+++36xS9+oZ07d87gzv63zPgzEF+RuLbJPEYfNX/+fN199906c+ZMLrY454x3DRUVFWnBggUTvp8ZD4ivSFzbZB6jj0qlUjpx4oSWLFmSq23OKVN2DXl9hyMXZuIrEnON18dox44drrOz0509e9b19PS4Bx980BUWFrqTJ0/O1I+QU1euXHHHjx93x48fd5Lcrl273PHjx92FCxecc85t27bNrV+/PjP/3Llz7rrrrnPf/e533Ztvvuna29tdQUGB6+jo8HTeWRGQc3xFYiK8PEabN2/OzA0Gg+7+++93vb29M7Dr6XH06NEx/wci/31MGhsb3erVq0etqaiocD6fzy1fvtzt37/f83n5OgNgMOO/AwFzGQEBBgQEGBAQYEBAgAEBAQYEBBgQEGBAQIABAQEGBAQY/Af362qPAlgjigAAAABJRU5ErkJggg=="
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": 50
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-08-11T12:05:58.453401900Z",
     "start_time": "2025-08-11T08:47:44.874153Z"
    }
   },
   "cell_type": "code",
   "source": [
    "## 获取测试集上的预测标签\n",
    "for X,y in test_iter:\n",
    "    break\n",
    "predict =net(X)\n",
    "predict_index = predict.argmax(axis = 1)\n",
    "print(predict_index)\n",
    "print(d2l.get_fashion_mnist_labels(predict_index))"
   ],
   "id": "10dd6eb2ae5932f8",
   "outputs": [],
   "execution_count": 58
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
