{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-09-17T02:21:23.354282Z",
     "start_time": "2025-09-17T02:21:20.382842Z"
    }
   },
   "source": [
    "import torch\n",
    "import torchvision\n",
    "from torch import nn\n",
    "from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from torchvision import transforms\n",
    "\n",
    "import time\n",
    "\n",
    "# 将图像数据进行归一化处理，使得模型更容易收敛\n",
    "transform = transforms.Compose([\n",
    "    transforms.Resize(224),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n",
    "])\n",
    "\n",
    "train_data = torchvision.datasets.CIFAR10(root='../dataset', train=True, download=True, transform=transform)\n",
    "train_loader = DataLoader(train_data, batch_size=128, shuffle=True)\n",
    "\n",
    "test_data = torchvision.datasets.CIFAR10(root='../dataset', train=False, download=True, transform=transform)\n",
    "test_loader = DataLoader(test_data, batch_size=128, shuffle=True)\n",
    "\n",
    "print(f\"训练集的长度为：{len(train_data)}\")\n",
    "print(f\"测试集的长度为：{len(test_data)}\")\n",
    "print(f\"特征尺度为:{train_data[0][0].shape}\")\n",
    "device = torch.device(\"mps\" if torch.backends.mps.is_available() else \"cpu\")"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集的长度为：50000\n",
      "测试集的长度为：10000\n",
      "特征尺度为:torch.Size([3, 224, 224])\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T03:49:35.464146Z",
     "start_time": "2025-09-17T03:49:34.430464Z"
    }
   },
   "cell_type": "code",
   "source": [
    "drop = 0.4\n",
    "class AlexNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(AlexNet, self).__init__()\n",
    "        self.model1 = Sequential(\n",
    "            nn.Conv2d(3, 96, kernel_size=11, stride=4, padding=1),nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2),\n",
    "            nn.Conv2d(96, 256, kernel_size=3, padding=2),nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2),\n",
    "            nn.Conv2d(256, 384, kernel_size=3, padding=1),nn.ReLU(),\n",
    "            nn.Conv2d(384, 384, kernel_size=3, padding=1),nn.ReLU(),\n",
    "            nn.Conv2d(384, 256, kernel_size=3, padding=1),nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2),\n",
    "            nn.Flatten(),\n",
    "            nn.Linear(6400,4096, bias=True),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop),\n",
    "            nn.Linear(4096, 4096, bias=True),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(drop),\n",
    "            nn.Linear(4096, 10),\n",
    "        )\n",
    "\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.model1(x)\n",
    "        return x\n",
    "\n",
    "model = AlexNet().to(device)\n",
    "print(model)\n",
    "from torchinfo import summary\n",
    "summary(model, (1, 3, 224, 224))"
   ],
   "id": "d63d9a3458705d69",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "AlexNet(\n",
      "  (model1): Sequential(\n",
      "    (0): Conv2d(3, 96, kernel_size=(11, 11), stride=(4, 4), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (3): Conv2d(96, 256, kernel_size=(3, 3), stride=(1, 1), padding=(2, 2))\n",
      "    (4): ReLU()\n",
      "    (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (6): Conv2d(256, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (7): ReLU()\n",
      "    (8): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (9): ReLU()\n",
      "    (10): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (11): ReLU()\n",
      "    (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "    (13): Flatten(start_dim=1, end_dim=-1)\n",
      "    (14): Linear(in_features=6400, out_features=4096, bias=True)\n",
      "    (15): ReLU()\n",
      "    (16): Dropout(p=0.4, inplace=False)\n",
      "    (17): Linear(in_features=4096, out_features=4096, bias=True)\n",
      "    (18): ReLU()\n",
      "    (19): Dropout(p=0.4, inplace=False)\n",
      "    (20): Linear(in_features=4096, out_features=10, bias=True)\n",
      "  )\n",
      ")\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "Failed to run torchinfo. See above stack traces for more details. Executed layers up to: [Conv2d: 2, ReLU: 2, MaxPool2d: 2, Conv2d: 2, ReLU: 2, MaxPool2d: 2, Conv2d: 2, ReLU: 2, Conv2d: 2, ReLU: 2, Conv2d: 2, ReLU: 2, MaxPool2d: 2, Flatten: 2]",
     "output_type": "error",
     "traceback": [
      "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[0;31mRuntimeError\u001B[0m                              Traceback (most recent call last)",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torchinfo/torchinfo.py:295\u001B[0m, in \u001B[0;36mforward_pass\u001B[0;34m(model, x, batch_dim, cache_forward_pass, device, mode, **kwargs)\u001B[0m\n\u001B[1;32m    294\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(x, (\u001B[38;5;28mlist\u001B[39m, \u001B[38;5;28mtuple\u001B[39m)):\n\u001B[0;32m--> 295\u001B[0m     _ \u001B[38;5;241m=\u001B[39m \u001B[43mmodel\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mx\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m    296\u001B[0m \u001B[38;5;28;01melif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(x, \u001B[38;5;28mdict\u001B[39m):\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1775\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1774\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1775\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1881\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1880\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m-> 1881\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43minner\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1882\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m:\n\u001B[1;32m   1883\u001B[0m     \u001B[38;5;66;03m# run always called hooks if they have not already been run\u001B[39;00m\n\u001B[1;32m   1884\u001B[0m     \u001B[38;5;66;03m# For now only forward hooks have the always_call option but perhaps\u001B[39;00m\n\u001B[1;32m   1885\u001B[0m     \u001B[38;5;66;03m# this functionality should be added to full backward hooks as well.\u001B[39;00m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1829\u001B[0m, in \u001B[0;36mModule._call_impl.<locals>.inner\u001B[0;34m()\u001B[0m\n\u001B[1;32m   1827\u001B[0m     args \u001B[38;5;241m=\u001B[39m bw_hook\u001B[38;5;241m.\u001B[39msetup_input_hook(args)\n\u001B[0;32m-> 1829\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1830\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks:\n",
      "Cell \u001B[0;32mIn[15], line 26\u001B[0m, in \u001B[0;36mAlexNet.forward\u001B[0;34m(self, x)\u001B[0m\n\u001B[1;32m     25\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21mforward\u001B[39m(\u001B[38;5;28mself\u001B[39m, x):\n\u001B[0;32m---> 26\u001B[0m     x \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mmodel1\u001B[49m\u001B[43m(\u001B[49m\u001B[43mx\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m     27\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m x\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1775\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1774\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1775\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1881\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1880\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m-> 1881\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43minner\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1882\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m:\n\u001B[1;32m   1883\u001B[0m     \u001B[38;5;66;03m# run always called hooks if they have not already been run\u001B[39;00m\n\u001B[1;32m   1884\u001B[0m     \u001B[38;5;66;03m# For now only forward hooks have the always_call option but perhaps\u001B[39;00m\n\u001B[1;32m   1885\u001B[0m     \u001B[38;5;66;03m# this functionality should be added to full backward hooks as well.\u001B[39;00m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1829\u001B[0m, in \u001B[0;36mModule._call_impl.<locals>.inner\u001B[0;34m()\u001B[0m\n\u001B[1;32m   1827\u001B[0m     args \u001B[38;5;241m=\u001B[39m bw_hook\u001B[38;5;241m.\u001B[39msetup_input_hook(args)\n\u001B[0;32m-> 1829\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1830\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks:\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/container.py:250\u001B[0m, in \u001B[0;36mSequential.forward\u001B[0;34m(self, input)\u001B[0m\n\u001B[1;32m    249\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m module \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mself\u001B[39m:\n\u001B[0;32m--> 250\u001B[0m     \u001B[38;5;28minput\u001B[39m \u001B[38;5;241m=\u001B[39m \u001B[43mmodule\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43minput\u001B[39;49m\u001B[43m)\u001B[49m\n\u001B[1;32m    251\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28minput\u001B[39m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1775\u001B[0m, in \u001B[0;36mModule._wrapped_call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1774\u001B[0m \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[0;32m-> 1775\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_call_impl\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1881\u001B[0m, in \u001B[0;36mModule._call_impl\u001B[0;34m(self, *args, **kwargs)\u001B[0m\n\u001B[1;32m   1880\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[0;32m-> 1881\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43minner\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1882\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m:\n\u001B[1;32m   1883\u001B[0m     \u001B[38;5;66;03m# run always called hooks if they have not already been run\u001B[39;00m\n\u001B[1;32m   1884\u001B[0m     \u001B[38;5;66;03m# For now only forward hooks have the always_call option but perhaps\u001B[39;00m\n\u001B[1;32m   1885\u001B[0m     \u001B[38;5;66;03m# this functionality should be added to full backward hooks as well.\u001B[39;00m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/module.py:1829\u001B[0m, in \u001B[0;36mModule._call_impl.<locals>.inner\u001B[0;34m()\u001B[0m\n\u001B[1;32m   1827\u001B[0m     args \u001B[38;5;241m=\u001B[39m bw_hook\u001B[38;5;241m.\u001B[39msetup_input_hook(args)\n\u001B[0;32m-> 1829\u001B[0m result \u001B[38;5;241m=\u001B[39m \u001B[43mforward_call\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m   1830\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m _global_forward_hooks \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_forward_hooks:\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torch/nn/modules/linear.py:134\u001B[0m, in \u001B[0;36mLinear.forward\u001B[0;34m(self, input)\u001B[0m\n\u001B[1;32m    131\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[1;32m    132\u001B[0m \u001B[38;5;124;03mRuns the forward pass.\u001B[39;00m\n\u001B[1;32m    133\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m--> 134\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mF\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mlinear\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43minput\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mweight\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbias\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[0;31mRuntimeError\u001B[0m: mat1 and mat2 shapes cannot be multiplied (1x9216 and 6400x4096)",
      "\nThe above exception was the direct cause of the following exception:\n",
      "\u001B[0;31mRuntimeError\u001B[0m                              Traceback (most recent call last)",
      "Cell \u001B[0;32mIn[15], line 32\u001B[0m\n\u001B[1;32m     30\u001B[0m \u001B[38;5;28mprint\u001B[39m(model)\n\u001B[1;32m     31\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01mtorchinfo\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;28;01mimport\u001B[39;00m summary\n\u001B[0;32m---> 32\u001B[0m \u001B[43msummary\u001B[49m\u001B[43m(\u001B[49m\u001B[43mmodel\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;241;43m1\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m3\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m224\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m224\u001B[39;49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torchinfo/torchinfo.py:223\u001B[0m, in \u001B[0;36msummary\u001B[0;34m(model, input_size, input_data, batch_dim, cache_forward_pass, col_names, col_width, depth, device, dtypes, mode, row_settings, verbose, **kwargs)\u001B[0m\n\u001B[1;32m    216\u001B[0m validate_user_params(\n\u001B[1;32m    217\u001B[0m     input_data, input_size, columns, col_width, device, dtypes, verbose\n\u001B[1;32m    218\u001B[0m )\n\u001B[1;32m    220\u001B[0m x, correct_input_size \u001B[38;5;241m=\u001B[39m process_input(\n\u001B[1;32m    221\u001B[0m     input_data, input_size, batch_dim, device, dtypes\n\u001B[1;32m    222\u001B[0m )\n\u001B[0;32m--> 223\u001B[0m summary_list \u001B[38;5;241m=\u001B[39m \u001B[43mforward_pass\u001B[49m\u001B[43m(\u001B[49m\n\u001B[1;32m    224\u001B[0m \u001B[43m    \u001B[49m\u001B[43mmodel\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mx\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbatch_dim\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcache_forward_pass\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdevice\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmodel_mode\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\n\u001B[1;32m    225\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[1;32m    226\u001B[0m formatting \u001B[38;5;241m=\u001B[39m FormattingOptions(depth, verbose, columns, col_width, rows)\n\u001B[1;32m    227\u001B[0m results \u001B[38;5;241m=\u001B[39m ModelStatistics(\n\u001B[1;32m    228\u001B[0m     summary_list, correct_input_size, get_total_memory_used(x), formatting\n\u001B[1;32m    229\u001B[0m )\n",
      "File \u001B[0;32m~/anaconda3/envs/study/lib/python3.10/site-packages/torchinfo/torchinfo.py:304\u001B[0m, in \u001B[0;36mforward_pass\u001B[0;34m(model, x, batch_dim, cache_forward_pass, device, mode, **kwargs)\u001B[0m\n\u001B[1;32m    302\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mException\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m e:\n\u001B[1;32m    303\u001B[0m     executed_layers \u001B[38;5;241m=\u001B[39m [layer \u001B[38;5;28;01mfor\u001B[39;00m layer \u001B[38;5;129;01min\u001B[39;00m summary_list \u001B[38;5;28;01mif\u001B[39;00m layer\u001B[38;5;241m.\u001B[39mexecuted]\n\u001B[0;32m--> 304\u001B[0m     \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mRuntimeError\u001B[39;00m(\n\u001B[1;32m    305\u001B[0m         \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mFailed to run torchinfo. See above stack traces for more details. \u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m    306\u001B[0m         \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mExecuted layers up to: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mexecuted_layers\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m\"\u001B[39m\n\u001B[1;32m    307\u001B[0m     ) \u001B[38;5;28;01mfrom\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[38;5;21;01me\u001B[39;00m\n\u001B[1;32m    308\u001B[0m \u001B[38;5;28;01mfinally\u001B[39;00m:\n\u001B[1;32m    309\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m hooks:\n",
      "\u001B[0;31mRuntimeError\u001B[0m: Failed to run torchinfo. See above stack traces for more details. Executed layers up to: [Conv2d: 2, ReLU: 2, MaxPool2d: 2, Conv2d: 2, ReLU: 2, MaxPool2d: 2, Conv2d: 2, ReLU: 2, Conv2d: 2, ReLU: 2, Conv2d: 2, ReLU: 2, MaxPool2d: 2, Flatten: 2]"
     ]
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T02:24:07.571886Z",
     "start_time": "2025-09-17T02:24:07.296401Z"
    }
   },
   "cell_type": "code",
   "source": [
    "X = torch.randn(1, 3, 224, 224).to(device)\n",
    "for layer in model.model1:\n",
    "    X = layer(X)\n",
    "    print(layer.__class__.__name__, f'output shape:{X.shape}')"
   ],
   "id": "63d5970cbc0d0e77",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Conv2d output shape:torch.Size([1, 96, 54, 54])\n",
      "ReLU output shape:torch.Size([1, 96, 54, 54])\n",
      "MaxPool2d output shape:torch.Size([1, 96, 26, 26])\n",
      "Conv2d output shape:torch.Size([1, 256, 26, 26])\n",
      "ReLU output shape:torch.Size([1, 256, 26, 26])\n",
      "MaxPool2d output shape:torch.Size([1, 256, 12, 12])\n",
      "Conv2d output shape:torch.Size([1, 384, 12, 12])\n",
      "ReLU output shape:torch.Size([1, 384, 12, 12])\n",
      "Conv2d output shape:torch.Size([1, 384, 12, 12])\n",
      "ReLU output shape:torch.Size([1, 384, 12, 12])\n",
      "Conv2d output shape:torch.Size([1, 256, 12, 12])\n",
      "ReLU output shape:torch.Size([1, 256, 12, 12])\n",
      "MaxPool2d output shape:torch.Size([1, 256, 5, 5])\n",
      "Flatten output shape:torch.Size([1, 6400])\n",
      "Linear output shape:torch.Size([1, 4096])\n",
      "ReLU output shape:torch.Size([1, 4096])\n",
      "Dropout output shape:torch.Size([1, 4096])\n",
      "Linear output shape:torch.Size([1, 4096])\n",
      "ReLU output shape:torch.Size([1, 4096])\n",
      "Dropout output shape:torch.Size([1, 4096])\n",
      "Linear output shape:torch.Size([1, 10])\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T02:24:35.109348Z",
     "start_time": "2025-09-17T02:24:35.100077Z"
    }
   },
   "cell_type": "code",
   "source": [
    "loss_fn = nn.CrossEntropyLoss()\n",
    "if torch.backends.mps.is_available():\n",
    "    loss_fn = loss_fn.to(device)\n",
    "learning_rate = 1e-4\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
    "epochs = 10\n",
    "\n",
    "\n",
    "# tensorboard记录一下训练时的 loss 变换\n",
    "writer = SummaryWriter(\"./logs-train\")"
   ],
   "id": "84a638b4c12f4198",
   "outputs": [],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-17T03:39:25.309399Z",
     "start_time": "2025-09-17T02:24:37.231252Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from d2l_learn.utils import *\n",
    "train_CIAFAR10(model, train_data, train_loader, test_data, test_loader, loss_fn, optimizer, epochs, device, writer)"
   ],
   "id": "119abb1b50eb17f7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-------------第0轮训练开始了------------\n",
      "训练 100 次的时间为：29.910260915756226\n",
      "-------------训练次数：100，loss 为: 1.9270880222320557------------\n",
      "训练 100 次的时间为：58.64410185813904\n",
      "-------------训练次数：200，loss 为: 1.764473557472229------------\n",
      "训练 100 次的时间为：86.75780200958252\n",
      "-------------训练次数：300，loss 为: 1.5683321952819824------------\n",
      "训练 100 次的时间为：117.35748195648193\n",
      "-------------训练次数：400，loss 为: 1.4543482065200806------------\n",
      "训练 100 次的时间为：152.15649008750916\n",
      "-------------训练次数：500，loss 为: 1.332453966140747------------\n",
      "训练 100 次的时间为：191.06347012519836\n",
      "-------------训练次数：600，loss 为: 1.2320983409881592------------\n",
      "训练 100 次的时间为：235.6842918395996\n",
      "-------------训练次数：700，loss 为: 1.240763545036316------------\n",
      "epoch：0, 训练集上的accuracy: 42.15%\n",
      "------------测试集上的 loss: 0.018842142075300217, accuracy: 0.5649999976158142\n",
      "-------------第1轮训练开始了------------\n",
      "训练 100 次的时间为：8.36197018623352\n",
      "-------------训练次数：800，loss 为: 1.3179035186767578------------\n",
      "训练 100 次的时间为：52.875479221343994\n",
      "-------------训练次数：900，loss 为: 1.2273907661437988------------\n",
      "训练 100 次的时间为：142.4671151638031\n",
      "-------------训练次数：1000，loss 为: 1.0595855712890625------------\n",
      "训练 100 次的时间为：293.26049304008484\n",
      "-------------训练次数：1100，loss 为: 0.9905940294265747------------\n",
      "训练 100 次的时间为：344.10757327079773\n",
      "-------------训练次数：1200，loss 为: 0.9634070992469788------------\n",
      "训练 100 次的时间为：390.89469718933105\n",
      "-------------训练次数：1300，loss 为: 0.9542380571365356------------\n",
      "训练 100 次的时间为：441.43770027160645\n",
      "-------------训练次数：1400，loss 为: 0.8387022614479065------------\n",
      "训练 100 次的时间为：491.32826018333435\n",
      "-------------训练次数：1500，loss 为: 0.8420892357826233------------\n",
      "epoch：1, 训练集上的accuracy: 62.18%\n",
      "------------测试集上的 loss: 0.014480232261121273, accuracy: 0.6754999756813049\n",
      "-------------第2轮训练开始了------------\n",
      "训练 100 次的时间为：18.100502014160156\n",
      "-------------训练次数：1600，loss 为: 0.703894853591919------------\n",
      "训练 100 次的时间为：65.04375982284546\n",
      "-------------训练次数：1700，loss 为: 0.8405019044876099------------\n",
      "训练 100 次的时间为：105.80676794052124\n",
      "-------------训练次数：1800，loss 为: 1.1767923831939697------------\n",
      "训练 100 次的时间为：151.30487298965454\n",
      "-------------训练次数：1900，loss 为: 0.8175066709518433------------\n",
      "训练 100 次的时间为：221.07471585273743\n",
      "-------------训练次数：2000，loss 为: 0.9077244400978088------------\n",
      "训练 100 次的时间为：279.73060297966003\n",
      "-------------训练次数：2100，loss 为: 0.8343098163604736------------\n",
      "训练 100 次的时间为：322.8946199417114\n",
      "-------------训练次数：2200，loss 为: 0.8637394905090332------------\n",
      "训练 100 次的时间为：388.69871497154236\n",
      "-------------训练次数：2300，loss 为: 0.6543537378311157------------\n",
      "epoch：2, 训练集上的accuracy: 71.34%\n",
      "------------测试集上的 loss: 0.012472373433411121, accuracy: 0.7253999710083008\n",
      "-------------第3轮训练开始了------------\n",
      "训练 100 次的时间为：35.07204508781433\n",
      "-------------训练次数：2400，loss 为: 0.5084746479988098------------\n",
      "训练 100 次的时间为：93.79054379463196\n",
      "-------------训练次数：2500，loss 为: 0.635942816734314------------\n",
      "训练 100 次的时间为：158.69364404678345\n",
      "-------------训练次数：2600，loss 为: 0.8991628885269165------------\n",
      "训练 100 次的时间为：219.6940848827362\n",
      "-------------训练次数：2700，loss 为: 0.7428869009017944------------\n",
      "训练 100 次的时间为：289.4804198741913\n",
      "-------------训练次数：2800，loss 为: 0.577072024345398------------\n",
      "训练 100 次的时间为：350.61764192581177\n",
      "-------------训练次数：2900，loss 为: 0.5399010181427002------------\n",
      "训练 100 次的时间为：399.6053099632263\n",
      "-------------训练次数：3000，loss 为: 0.653327226638794------------\n",
      "训练 100 次的时间为：443.3753628730774\n",
      "-------------训练次数：3100，loss 为: 0.7750781178474426------------\n",
      "epoch：3, 训练集上的accuracy: 77.01%\n",
      "------------测试集上的 loss: 0.010501526296138763, accuracy: 0.7684999704360962\n",
      "-------------第4轮训练开始了------------\n",
      "训练 100 次的时间为：36.447585105895996\n",
      "-------------训练次数：3200，loss 为: 0.46652740240097046------------\n",
      "训练 100 次的时间为：94.90051198005676\n",
      "-------------训练次数：3300，loss 为: 0.5017637014389038------------\n",
      "训练 100 次的时间为：146.2560088634491\n",
      "-------------训练次数：3400，loss 为: 0.3721081614494324------------\n",
      "训练 100 次的时间为：215.51079511642456\n",
      "-------------训练次数：3500，loss 为: 0.6931602954864502------------\n",
      "训练 100 次的时间为：289.89723801612854\n",
      "-------------训练次数：3600，loss 为: 0.6000766754150391------------\n",
      "训练 100 次的时间为：343.4432508945465\n",
      "-------------训练次数：3700，loss 为: 0.46689799427986145------------\n",
      "训练 100 次的时间为：394.6532971858978\n",
      "-------------训练次数：3800，loss 为: 0.4560313820838928------------\n",
      "训练 100 次的时间为：453.2519030570984\n",
      "-------------训练次数：3900，loss 为: 0.5799048542976379------------\n",
      "epoch：4, 训练集上的accuracy: 81.39%\n",
      "------------测试集上的 loss: 0.010095602832734585, accuracy: 0.7795000076293945\n",
      "-------------第5轮训练开始了------------\n",
      "训练 100 次的时间为：40.519726037979126\n",
      "-------------训练次数：4000，loss 为: 0.4280250668525696------------\n",
      "训练 100 次的时间为：89.30830788612366\n",
      "-------------训练次数：4100，loss 为: 0.4207298755645752------------\n",
      "训练 100 次的时间为：131.89187502861023\n",
      "-------------训练次数：4200，loss 为: 0.5934892296791077------------\n",
      "训练 100 次的时间为：180.5677089691162\n",
      "-------------训练次数：4300，loss 为: 0.39611995220184326------------\n",
      "训练 100 次的时间为：227.43841695785522\n",
      "-------------训练次数：4400，loss 为: 0.4569835364818573------------\n",
      "训练 100 次的时间为：269.01211285591125\n",
      "-------------训练次数：4500，loss 为: 0.34180280566215515------------\n",
      "训练 100 次的时间为：318.5636501312256\n",
      "-------------训练次数：4600，loss 为: 0.4155367314815521------------\n",
      "epoch：5, 训练集上的accuracy: 85.19%\n",
      "------------测试集上的 loss: 0.009563799947500229, accuracy: 0.7947999835014343\n",
      "-------------第6轮训练开始了------------\n",
      "训练 100 次的时间为：3.115821123123169\n",
      "-------------训练次数：4700，loss 为: 0.30446746945381165------------\n",
      "训练 100 次的时间为：47.326570987701416\n",
      "-------------训练次数：4800，loss 为: 0.3595137894153595------------\n",
      "训练 100 次的时间为：100.35133194923401\n",
      "-------------训练次数：4900，loss 为: 0.23539012670516968------------\n",
      "训练 100 次的时间为：149.0975739955902\n",
      "-------------训练次数：5000，loss 为: 0.5483778715133667------------\n",
      "训练 100 次的时间为：198.34368014335632\n",
      "-------------训练次数：5100，loss 为: 0.26430463790893555------------\n",
      "训练 100 次的时间为：251.00032305717468\n",
      "-------------训练次数：5200，loss 为: 0.25668033957481384------------\n",
      "训练 100 次的时间为：333.1263949871063\n",
      "-------------训练次数：5300，loss 为: 0.379652738571167------------\n",
      "训练 100 次的时间为：409.5745849609375\n",
      "-------------训练次数：5400，loss 为: 0.2824628949165344------------\n",
      "epoch：6, 训练集上的accuracy: 88.27%\n",
      "------------测试集上的 loss: 0.009977594949305058, accuracy: 0.8007000088691711\n",
      "-------------第7轮训练开始了------------\n",
      "训练 100 次的时间为：15.729869842529297\n",
      "-------------训练次数：5500，loss 为: 0.18201956152915955------------\n",
      "训练 100 次的时间为：78.53258395195007\n",
      "-------------训练次数：5600，loss 为: 0.233649343252182------------\n",
      "训练 100 次的时间为：172.62533402442932\n",
      "-------------训练次数：5700，loss 为: 0.271463543176651------------\n",
      "训练 100 次的时间为：231.3890838623047\n",
      "-------------训练次数：5800，loss 为: 0.3524433970451355------------\n",
      "训练 100 次的时间为：297.748281955719\n",
      "-------------训练次数：5900，loss 为: 0.15417969226837158------------\n",
      "训练 100 次的时间为：352.10278511047363\n",
      "-------------训练次数：6000，loss 为: 0.2541019916534424------------\n",
      "训练 100 次的时间为：394.2185580730438\n",
      "-------------训练次数：6100，loss 为: 0.21335354447364807------------\n",
      "训练 100 次的时间为：440.5448389053345\n",
      "-------------训练次数：6200，loss 为: 0.17008116841316223------------\n",
      "epoch：7, 训练集上的accuracy: 91.22%\n",
      "------------测试集上的 loss: 0.009543174877762794, accuracy: 0.8137999773025513\n",
      "-------------第8轮训练开始了------------\n",
      "训练 100 次的时间为：21.579252243041992\n",
      "-------------训练次数：6300，loss 为: 0.2575920820236206------------\n",
      "训练 100 次的时间为：67.12344908714294\n",
      "-------------训练次数：6400，loss 为: 0.06906352937221527------------\n",
      "训练 100 次的时间为：126.51123929023743\n",
      "-------------训练次数：6500，loss 为: 0.21635255217552185------------\n",
      "训练 100 次的时间为：184.78562211990356\n",
      "-------------训练次数：6600，loss 为: 0.13700690865516663------------\n",
      "训练 100 次的时间为：233.8077220916748\n",
      "-------------训练次数：6700，loss 为: 0.079024538397789------------\n",
      "训练 100 次的时间为：285.26482105255127\n",
      "-------------训练次数：6800，loss 为: 0.12659704685211182------------\n",
      "训练 100 次的时间为：332.29872012138367\n",
      "-------------训练次数：6900，loss 为: 0.18744716048240662------------\n",
      "训练 100 次的时间为：385.2235231399536\n",
      "-------------训练次数：7000，loss 为: 0.14462801814079285------------\n",
      "epoch：8, 训练集上的accuracy: 93.62%\n",
      "------------测试集上的 loss: 0.010292364284396172, accuracy: 0.8141999840736389\n",
      "-------------第9轮训练开始了------------\n",
      "训练 100 次的时间为：29.204340934753418\n",
      "-------------训练次数：7100，loss 为: 0.10389676690101624------------\n",
      "训练 100 次的时间为：73.29441475868225\n",
      "-------------训练次数：7200，loss 为: 0.11025363206863403------------\n",
      "训练 100 次的时间为：128.04870796203613\n",
      "-------------训练次数：7300，loss 为: 0.06164054572582245------------\n",
      "训练 100 次的时间为：175.31912088394165\n",
      "-------------训练次数：7400，loss 为: 0.11061199009418488------------\n",
      "训练 100 次的时间为：237.62560105323792\n",
      "-------------训练次数：7500，loss 为: 0.09058353304862976------------\n",
      "训练 100 次的时间为：288.7699010372162\n",
      "-------------训练次数：7600，loss 为: 0.15159711241722107------------\n",
      "训练 100 次的时间为：338.72249603271484\n",
      "-------------训练次数：7700，loss 为: 0.29083138704299927------------\n",
      "训练 100 次的时间为：393.51499485969543\n",
      "-------------训练次数：7800，loss 为: 0.2162688672542572------------\n",
      "epoch：9, 训练集上的accuracy: 94.96%\n",
      "------------测试集上的 loss: 0.01223487313836813, accuracy: 0.802299976348877\n"
     ]
    }
   ],
   "execution_count": 11
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
