{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<font color=\"red\">注</font>: 使用 tensorboard 可视化需要安装 tensorflow (TensorBoard依赖于tensorflow库，可以任意安装tensorflow的gpu/cpu版本)\n",
    "\n",
    "```shell\n",
    "pip install tensorflow-cpu\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:10.603406Z",
     "start_time": "2025-02-28T14:55:10.597405Z"
    }
   },
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "from tqdm.auto import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, torch:\n",
    "    print(module.__name__, module.__version__)\n",
    "    \n",
    "device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
    "print(device)\n",
    "\n",
    "seed = 42\n"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sys.version_info(major=3, minor=12, micro=3, releaselevel='final', serial=0)\n",
      "matplotlib 3.10.0\n",
      "numpy 2.0.2\n",
      "pandas 2.2.3\n",
      "sklearn 1.6.1\n",
      "torch 2.6.0+cu126\n",
      "cuda:0\n"
     ]
    }
   ],
   "execution_count": 5
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据准备\n",
    "\n",
    "https://www.kaggle.com/competitions/cifar-10/data\n",
    "\n",
    "```shell\n",
    "$ tree -L 1 cifar-10                                    \n",
    "cifar-10\n",
    "├── sampleSubmission.csv\n",
    "├── test\n",
    "├── train\n",
    "└── trainLabels.csv\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:11.973330Z",
     "start_time": "2025-02-28T14:55:10.653418Z"
    }
   },
   "source": [
    "from pathlib import Path\n",
    "\n",
    "DATA_DIR = Path(\"D:/Python/Python_code/Python_code2025/day24/cifar-10\")\n",
    "\n",
    "train_lables_file = DATA_DIR / \"trainLabels.csv\"\n",
    "test_csv_file = DATA_DIR / \"sampleSubmission.csv\"\n",
    "train_folder = DATA_DIR / \"train\"\n",
    "test_folder = DATA_DIR / \"test\"\n",
    "\n",
    "#所有的类别\n",
    "class_names = [\n",
    "    'airplane',\n",
    "    'automobile',\n",
    "    'bird',\n",
    "    'cat',\n",
    "    'deer',\n",
    "    'dog',\n",
    "    'frog',\n",
    "    'horse',\n",
    "    'ship',\n",
    "    'truck',\n",
    "]\n",
    "\n",
    "def parse_csv_file(filepath, folder):\n",
    "    \"\"\"Parses csv files into (filename(path), label) format\"\"\"\n",
    "    results = []\n",
    "    #读取所有行\n",
    "    with open(filepath, 'r') as f:\n",
    "#         lines = f.readlines()  为什么加[1:]，可以试这个\n",
    "        #第一行不需要，因为第一行是标签\n",
    "        lines = f.readlines()[1:] \n",
    "    for line in lines:#依次去取每一行\n",
    "        image_id, label_str = line.strip('\\n').split(',')\n",
    "        image_full_path = folder / f\"{image_id}.png\"\n",
    "        results.append((image_full_path, label_str)) #得到对应图片的路径和分类\n",
    "    return results\n",
    "\n",
    "#解析对应的文件夹\n",
    "train_labels_info = parse_csv_file(train_lables_file, train_folder)\n",
    "test_csv_info = parse_csv_file(test_csv_file, test_folder)\n",
    "#打印\n",
    "import pprint\n",
    "pprint.pprint(train_labels_info[0:5])\n",
    "pprint.pprint(test_csv_info[0:5])\n",
    "print(len(train_labels_info), len(test_csv_info))"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[(WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/train/1.png'),\n",
      "  'frog'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/train/2.png'),\n",
      "  'truck'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/train/3.png'),\n",
      "  'truck'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/train/4.png'),\n",
      "  'deer'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/train/5.png'),\n",
      "  'automobile')]\n",
      "[(WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/test/1.png'),\n",
      "  'cat'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/test/2.png'),\n",
      "  'cat'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/test/3.png'),\n",
      "  'cat'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/test/4.png'),\n",
      "  'cat'),\n",
      " (WindowsPath('D:/Python/Python_code/Python_code2025/day24/cifar-10/test/5.png'),\n",
      "  'cat')]\n",
      "50000 300000\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:12.038507Z",
     "start_time": "2025-02-28T14:55:11.974327Z"
    }
   },
   "source": [
    "# train_df = pd.DataFrame(train_labels_info)\n",
    "train_df = pd.DataFrame(train_labels_info[0:45000])\n",
    "valid_df = pd.DataFrame(train_labels_info[45000:])\n",
    "test_df = pd.DataFrame(test_csv_info)\n",
    "\n",
    "train_df.columns = ['filepath', 'class']\n",
    "valid_df.columns = ['filepath', 'class']\n",
    "test_df.columns = ['filepath', 'class']\n",
    "\n",
    "print(train_df.head())\n",
    "print(valid_df.head())\n",
    "print(test_df.head())"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                                            filepath       class\n",
      "0  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...        frog\n",
      "1  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...       truck\n",
      "2  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...       truck\n",
      "3  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...        deer\n",
      "4  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...  automobile\n",
      "                                            filepath       class\n",
      "0  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...       horse\n",
      "1  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...  automobile\n",
      "2  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...        deer\n",
      "3  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...  automobile\n",
      "4  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...    airplane\n",
      "                                            filepath class\n",
      "0  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...   cat\n",
      "1  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...   cat\n",
      "2  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...   cat\n",
      "3  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...   cat\n",
      "4  D:\\Python\\Python_code\\Python_code2025\\day24\\ci...   cat\n"
     ]
    }
   ],
   "execution_count": 7
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.131827Z",
     "start_time": "2025-02-28T14:55:12.039506Z"
    }
   },
   "source": [
    "from PIL import Image\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torchvision import transforms\n",
    "\n",
    "class Cifar10Dataset(Dataset):\n",
    "    df_map = {\n",
    "        \"train\": train_df,\n",
    "        \"eval\": valid_df,\n",
    "        \"test\": test_df\n",
    "    }\n",
    "    label_to_idx = {label: idx for idx, label in enumerate(class_names)}\n",
    "    idx_to_label = {idx: label for idx, label in enumerate(class_names)}\n",
    "    def __init__(self, mode, transform=None):\n",
    "        self.df = self.df_map.get(mode, None)\n",
    "        if self.df is None:\n",
    "            raise ValueError(\"mode should be one of train, val, test, but got {}\".format(mode))\n",
    "\n",
    "        self.transform = transform\n",
    "        \n",
    "    def __getitem__(self, index):\n",
    "        img_path, label = self.df.iloc[index]\n",
    "        img = Image.open(img_path).convert('RGB')\n",
    "        # # img 转换为 channel first\n",
    "        # img = img.transpose((2, 0, 1))\n",
    "        # transform\n",
    "        img = self.transform(img)\n",
    "        # label 转换为 idx\n",
    "        label = self.label_to_idx[label]\n",
    "        return img, label\n",
    "    \n",
    "    def __len__(self):\n",
    "        return self.df.shape[0]\n",
    "    \n",
    "IMAGE_SIZE = 32\n",
    "mean, std = [0.4914, 0.4822, 0.4465], [0.247, 0.243, 0.261]\n",
    "\n",
    "transforms_train = transforms.Compose([\n",
    "        # resize\n",
    "        transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n",
    "        # random rotation 40\n",
    "        transforms.RandomRotation(40),\n",
    "        # horizaontal flip\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "\n",
    "transforms_eval = transforms.Compose([\n",
    "        # resize\n",
    "        transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "\n",
    "train_ds = Cifar10Dataset(\"train\", transforms_train)\n",
    "eval_ds = Cifar10Dataset(\"eval\", transforms_eval) "
   ],
   "outputs": [],
   "execution_count": 8
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.136824Z",
     "start_time": "2025-02-28T14:55:14.132823Z"
    }
   },
   "source": [
    "batch_size = 128\n",
    "train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True, num_workers=4)   \n",
    "eval_dl = DataLoader(eval_ds, batch_size=batch_size, shuffle=False, num_workers=4)"
   ],
   "outputs": [],
   "execution_count": 9
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.159964Z",
     "start_time": "2025-02-28T14:55:14.136824Z"
    }
   },
   "source": [
    "# 遍历train_ds得到每张图片，计算每个通道的均值和方差\n",
    "# def cal_mean_std(ds):\n",
    "#     mean = 0.\n",
    "#     std = 0.\n",
    "#     for img, _ in ds:\n",
    "#         mean += img.mean(dim=(1, 2))\n",
    "#         std += img.std(dim=(1, 2))\n",
    "#     mean /= len(ds)\n",
    "#     std /= len(ds)\n",
    "#     return mean, std\n",
    "#\n",
    "# # 经过 normalize 后 均值为0，方差为1\n",
    "# print(cal_mean_std(train_ds))"
   ],
   "outputs": [],
   "execution_count": 10
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.165908Z",
     "start_time": "2025-02-28T14:55:14.160961Z"
    }
   },
   "source": [
    "class Resdiual(nn.Module):\n",
    "    \"\"\"浅层的残差块，无bottleneck（性能限制）\"\"\"\n",
    "    def __init__(self, input_channels, output_channels, use_1x1conv=False, stride=1):\n",
    "        \"\"\"\n",
    "        残差块\n",
    "        params filters: 过滤器数目，决定输出通道\n",
    "        params use_1x1conv: 是否使用 1x1 卷积，此时 stride=2，进行降采样\n",
    "        params strides: 步长，默认为1，当降采样的时候设置为2\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.conv1 = nn.Conv2d(\n",
    "            in_channels=input_channels,\n",
    "            out_channels=output_channels,\n",
    "            kernel_size=3,\n",
    "            stride=stride,\n",
    "            padding=1,\n",
    "        )\n",
    "        self.conv2 = nn.Conv2d(\n",
    "            in_channels=output_channels,\n",
    "            out_channels=output_channels,\n",
    "            kernel_size=3,\n",
    "            stride=1,\n",
    "            padding=1,\n",
    "        )  \n",
    "        if use_1x1conv:\n",
    "            # skip connection 的 1x1 卷积，用于改变通道数和降采样，使得最终可以做残差连接\n",
    "            self.conv_sc = nn.Conv2d(\n",
    "                in_channels=input_channels,\n",
    "                out_channels=output_channels,\n",
    "                kernel_size=1,\n",
    "                stride=stride,\n",
    "            )\n",
    "        else:\n",
    "            self.conv_sc = None\n",
    "        \n",
    "        self.bn1 = nn.BatchNorm2d(output_channels, eps=1e-5, momentum=0.9)\n",
    "        self.bn2 = nn.BatchNorm2d(output_channels, eps=1e-5, momentum=0.9)\n",
    "        \n",
    "    def forward(self, inputs):\n",
    "        \"\"\"forward\"\"\"\n",
    "        flow = F.relu(self.bn1(self.conv1(inputs))) #卷积->BN->ReLU\n",
    "        flow = self.bn2(self.conv2(flow)) #卷积->BN\n",
    "        if self.conv_sc:#如果有1x1卷积，就用1x1卷积\n",
    "            inputs = self.conv_sc(inputs)\n",
    "        return F.relu(flow + inputs) #残差连接->ReLU\n",
    "    \n"
   ],
   "outputs": [],
   "execution_count": 11
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.170326Z",
     "start_time": "2025-02-28T14:55:14.166902Z"
    }
   },
   "source": [
    "class ResdiualBlock(nn.Module):\n",
    "    \"\"\"若干个 Resdiual 模块堆叠在一起，通常在第一个模块给 skip connection 使用 1x1conv with stride=2\"\"\"\n",
    "    def __init__(self, input_channels, output_channels, num, is_first=False):\n",
    "        \"\"\"\n",
    "        params filters: 过滤器数目\n",
    "        params num: 堆叠几个 Resdiual 模块\n",
    "        params is_first: 是不是第一个block。 最上面一层 Resdiual 的 stride=1,is_first=False,图像尺寸减半，False图像尺寸不变\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.model = nn.Sequential() # 用于存放 Resdiual 模块\n",
    "        self.model.append(Resdiual(\n",
    "            input_channels=input_channels, \n",
    "            output_channels=output_channels, \n",
    "            use_1x1conv=not is_first, \n",
    "            stride=1 if is_first else 2\n",
    "            )) # 第一个 Resdiual 模块\n",
    "        for _ in range(1, num):\n",
    "            # 堆叠 num 个 Resdiual 模块\n",
    "            self.model.append(Resdiual(\n",
    "                input_channels=output_channels, \n",
    "                output_channels=output_channels,\n",
    "                use_1x1conv=False, stride=1\n",
    "                ))\n",
    "    def forward(self, inputs):\n",
    "        return self.model(inputs)"
   ],
   "outputs": [],
   "execution_count": 12
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.192575Z",
     "start_time": "2025-02-28T14:55:14.171322Z"
    }
   },
   "source": [
    "class ResNetForCifar10(nn.Module):\n",
    "    def __init__(self, n=3, num_classes=10):\n",
    "        \"\"\"\n",
    "        params units: 预测类别的数目\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.model = nn.Sequential(\n",
    "            nn.Conv2d(\n",
    "                in_channels=3,\n",
    "                out_channels=16,\n",
    "                kernel_size=3,\n",
    "                stride=1,\n",
    "            ),  # conv1\n",
    "            nn.BatchNorm2d(16, momentum=0.9, eps=1e-5),\n",
    "            nn.ReLU(),\n",
    "            ResdiualBlock(input_channels=16, output_channels=16, num=2*n, is_first=True),  # conv2_x\n",
    "            ResdiualBlock(input_channels=16, output_channels=32, num=2*n),  # conv3_x\n",
    "            ResdiualBlock(input_channels=32, output_channels=64, num=2*n),  # conv3_x\n",
    "            # average pool\n",
    "            nn.AdaptiveAvgPool2d((1, 1)), #无论输入图片大小，输出都是1x1，把width和height压缩为1\n",
    "            nn.Flatten(),\n",
    "            # fully connected\n",
    "            nn.Linear(in_features=64, out_features=num_classes),\n",
    "            )\n",
    "        \n",
    "        self.init_weights()\n",
    "        \n",
    "    def init_weights(self):\n",
    "        \"\"\"使用 kaiming 均匀分布来初始化全连接层、卷积层的权重 W\"\"\"\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, (nn.Linear, nn.Conv2d)):\n",
    "                nn.init.kaiming_uniform_(m.weight)\n",
    "                nn.init.zeros_(m.bias)\n",
    "        \n",
    "    def forward(self, inputs):\n",
    "        return self.model(inputs)\n",
    "\n",
    "\n",
    "for key, value in ResNetForCifar10(num_classes=len(class_names)).named_parameters():\n",
    "    print(f\"{key:^40}paramerters num: {np.prod(value.shape)}\")"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "             model.0.weight             paramerters num: 432\n",
      "              model.0.bias              paramerters num: 16\n",
      "             model.1.weight             paramerters num: 16\n",
      "              model.1.bias              paramerters num: 16\n",
      "      model.3.model.0.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.0.conv1.bias       paramerters num: 16\n",
      "      model.3.model.0.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.0.conv2.bias       paramerters num: 16\n",
      "       model.3.model.0.bn1.weight       paramerters num: 16\n",
      "        model.3.model.0.bn1.bias        paramerters num: 16\n",
      "       model.3.model.0.bn2.weight       paramerters num: 16\n",
      "        model.3.model.0.bn2.bias        paramerters num: 16\n",
      "      model.3.model.1.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.1.conv1.bias       paramerters num: 16\n",
      "      model.3.model.1.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.1.conv2.bias       paramerters num: 16\n",
      "       model.3.model.1.bn1.weight       paramerters num: 16\n",
      "        model.3.model.1.bn1.bias        paramerters num: 16\n",
      "       model.3.model.1.bn2.weight       paramerters num: 16\n",
      "        model.3.model.1.bn2.bias        paramerters num: 16\n",
      "      model.3.model.2.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.2.conv1.bias       paramerters num: 16\n",
      "      model.3.model.2.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.2.conv2.bias       paramerters num: 16\n",
      "       model.3.model.2.bn1.weight       paramerters num: 16\n",
      "        model.3.model.2.bn1.bias        paramerters num: 16\n",
      "       model.3.model.2.bn2.weight       paramerters num: 16\n",
      "        model.3.model.2.bn2.bias        paramerters num: 16\n",
      "      model.3.model.3.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.3.conv1.bias       paramerters num: 16\n",
      "      model.3.model.3.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.3.conv2.bias       paramerters num: 16\n",
      "       model.3.model.3.bn1.weight       paramerters num: 16\n",
      "        model.3.model.3.bn1.bias        paramerters num: 16\n",
      "       model.3.model.3.bn2.weight       paramerters num: 16\n",
      "        model.3.model.3.bn2.bias        paramerters num: 16\n",
      "      model.3.model.4.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.4.conv1.bias       paramerters num: 16\n",
      "      model.3.model.4.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.4.conv2.bias       paramerters num: 16\n",
      "       model.3.model.4.bn1.weight       paramerters num: 16\n",
      "        model.3.model.4.bn1.bias        paramerters num: 16\n",
      "       model.3.model.4.bn2.weight       paramerters num: 16\n",
      "        model.3.model.4.bn2.bias        paramerters num: 16\n",
      "      model.3.model.5.conv1.weight      paramerters num: 2304\n",
      "       model.3.model.5.conv1.bias       paramerters num: 16\n",
      "      model.3.model.5.conv2.weight      paramerters num: 2304\n",
      "       model.3.model.5.conv2.bias       paramerters num: 16\n",
      "       model.3.model.5.bn1.weight       paramerters num: 16\n",
      "        model.3.model.5.bn1.bias        paramerters num: 16\n",
      "       model.3.model.5.bn2.weight       paramerters num: 16\n",
      "        model.3.model.5.bn2.bias        paramerters num: 16\n",
      "      model.4.model.0.conv1.weight      paramerters num: 4608\n",
      "       model.4.model.0.conv1.bias       paramerters num: 32\n",
      "      model.4.model.0.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.0.conv2.bias       paramerters num: 32\n",
      "     model.4.model.0.conv_sc.weight     paramerters num: 512\n",
      "      model.4.model.0.conv_sc.bias      paramerters num: 32\n",
      "       model.4.model.0.bn1.weight       paramerters num: 32\n",
      "        model.4.model.0.bn1.bias        paramerters num: 32\n",
      "       model.4.model.0.bn2.weight       paramerters num: 32\n",
      "        model.4.model.0.bn2.bias        paramerters num: 32\n",
      "      model.4.model.1.conv1.weight      paramerters num: 9216\n",
      "       model.4.model.1.conv1.bias       paramerters num: 32\n",
      "      model.4.model.1.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.1.conv2.bias       paramerters num: 32\n",
      "       model.4.model.1.bn1.weight       paramerters num: 32\n",
      "        model.4.model.1.bn1.bias        paramerters num: 32\n",
      "       model.4.model.1.bn2.weight       paramerters num: 32\n",
      "        model.4.model.1.bn2.bias        paramerters num: 32\n",
      "      model.4.model.2.conv1.weight      paramerters num: 9216\n",
      "       model.4.model.2.conv1.bias       paramerters num: 32\n",
      "      model.4.model.2.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.2.conv2.bias       paramerters num: 32\n",
      "       model.4.model.2.bn1.weight       paramerters num: 32\n",
      "        model.4.model.2.bn1.bias        paramerters num: 32\n",
      "       model.4.model.2.bn2.weight       paramerters num: 32\n",
      "        model.4.model.2.bn2.bias        paramerters num: 32\n",
      "      model.4.model.3.conv1.weight      paramerters num: 9216\n",
      "       model.4.model.3.conv1.bias       paramerters num: 32\n",
      "      model.4.model.3.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.3.conv2.bias       paramerters num: 32\n",
      "       model.4.model.3.bn1.weight       paramerters num: 32\n",
      "        model.4.model.3.bn1.bias        paramerters num: 32\n",
      "       model.4.model.3.bn2.weight       paramerters num: 32\n",
      "        model.4.model.3.bn2.bias        paramerters num: 32\n",
      "      model.4.model.4.conv1.weight      paramerters num: 9216\n",
      "       model.4.model.4.conv1.bias       paramerters num: 32\n",
      "      model.4.model.4.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.4.conv2.bias       paramerters num: 32\n",
      "       model.4.model.4.bn1.weight       paramerters num: 32\n",
      "        model.4.model.4.bn1.bias        paramerters num: 32\n",
      "       model.4.model.4.bn2.weight       paramerters num: 32\n",
      "        model.4.model.4.bn2.bias        paramerters num: 32\n",
      "      model.4.model.5.conv1.weight      paramerters num: 9216\n",
      "       model.4.model.5.conv1.bias       paramerters num: 32\n",
      "      model.4.model.5.conv2.weight      paramerters num: 9216\n",
      "       model.4.model.5.conv2.bias       paramerters num: 32\n",
      "       model.4.model.5.bn1.weight       paramerters num: 32\n",
      "        model.4.model.5.bn1.bias        paramerters num: 32\n",
      "       model.4.model.5.bn2.weight       paramerters num: 32\n",
      "        model.4.model.5.bn2.bias        paramerters num: 32\n",
      "      model.5.model.0.conv1.weight      paramerters num: 18432\n",
      "       model.5.model.0.conv1.bias       paramerters num: 64\n",
      "      model.5.model.0.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.0.conv2.bias       paramerters num: 64\n",
      "     model.5.model.0.conv_sc.weight     paramerters num: 2048\n",
      "      model.5.model.0.conv_sc.bias      paramerters num: 64\n",
      "       model.5.model.0.bn1.weight       paramerters num: 64\n",
      "        model.5.model.0.bn1.bias        paramerters num: 64\n",
      "       model.5.model.0.bn2.weight       paramerters num: 64\n",
      "        model.5.model.0.bn2.bias        paramerters num: 64\n",
      "      model.5.model.1.conv1.weight      paramerters num: 36864\n",
      "       model.5.model.1.conv1.bias       paramerters num: 64\n",
      "      model.5.model.1.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.1.conv2.bias       paramerters num: 64\n",
      "       model.5.model.1.bn1.weight       paramerters num: 64\n",
      "        model.5.model.1.bn1.bias        paramerters num: 64\n",
      "       model.5.model.1.bn2.weight       paramerters num: 64\n",
      "        model.5.model.1.bn2.bias        paramerters num: 64\n",
      "      model.5.model.2.conv1.weight      paramerters num: 36864\n",
      "       model.5.model.2.conv1.bias       paramerters num: 64\n",
      "      model.5.model.2.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.2.conv2.bias       paramerters num: 64\n",
      "       model.5.model.2.bn1.weight       paramerters num: 64\n",
      "        model.5.model.2.bn1.bias        paramerters num: 64\n",
      "       model.5.model.2.bn2.weight       paramerters num: 64\n",
      "        model.5.model.2.bn2.bias        paramerters num: 64\n",
      "      model.5.model.3.conv1.weight      paramerters num: 36864\n",
      "       model.5.model.3.conv1.bias       paramerters num: 64\n",
      "      model.5.model.3.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.3.conv2.bias       paramerters num: 64\n",
      "       model.5.model.3.bn1.weight       paramerters num: 64\n",
      "        model.5.model.3.bn1.bias        paramerters num: 64\n",
      "       model.5.model.3.bn2.weight       paramerters num: 64\n",
      "        model.5.model.3.bn2.bias        paramerters num: 64\n",
      "      model.5.model.4.conv1.weight      paramerters num: 36864\n",
      "       model.5.model.4.conv1.bias       paramerters num: 64\n",
      "      model.5.model.4.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.4.conv2.bias       paramerters num: 64\n",
      "       model.5.model.4.bn1.weight       paramerters num: 64\n",
      "        model.5.model.4.bn1.bias        paramerters num: 64\n",
      "       model.5.model.4.bn2.weight       paramerters num: 64\n",
      "        model.5.model.4.bn2.bias        paramerters num: 64\n",
      "      model.5.model.5.conv1.weight      paramerters num: 36864\n",
      "       model.5.model.5.conv1.bias       paramerters num: 64\n",
      "      model.5.model.5.conv2.weight      paramerters num: 36864\n",
      "       model.5.model.5.conv2.bias       paramerters num: 64\n",
      "       model.5.model.5.bn1.weight       paramerters num: 64\n",
      "        model.5.model.5.bn1.bias        paramerters num: 64\n",
      "       model.5.model.5.bn2.weight       paramerters num: 64\n",
      "        model.5.model.5.bn2.bias        paramerters num: 64\n",
      "             model.8.weight             paramerters num: 640\n",
      "              model.8.bias              paramerters num: 10\n"
     ]
    }
   ],
   "execution_count": 13
  },
  {
   "cell_type": "code",
   "source": [
    "3*7*7*64"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.196967Z",
     "start_time": "2025-02-28T14:55:14.193572Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "9408"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 14
  },
  {
   "cell_type": "code",
   "source": [
    "32*3*3*32"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.201523Z",
     "start_time": "2025-02-28T14:55:14.197963Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "9216"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 15
  },
  {
   "cell_type": "code",
   "source": [
    "#模型总参数量\n",
    "total_params = sum(p.numel() for p in ResNetForCifar10(num_classes=len(class_names)).parameters() if p.requires_grad)\n",
    "print(f\"Total trainable parameters: {total_params}\")"
   ],
   "metadata": {
    "collapsed": false,
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.216465Z",
     "start_time": "2025-02-28T14:55:14.202516Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total trainable parameters: 565386\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练\n",
    "\n",
    "pytorch的训练需要自行实现，包括\n",
    "1. 定义损失函数\n",
    "2. 定义优化器\n",
    "3. 定义训练步\n",
    "4. 训练"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:14.263888Z",
     "start_time": "2025-02-28T14:55:14.217428Z"
    }
   },
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "@torch.no_grad()\n",
    "def evaluating(model, dataloader, loss_fct):\n",
    "    loss_list = []\n",
    "    pred_list = []\n",
    "    label_list = []\n",
    "    for datas, labels in dataloader:\n",
    "        datas = datas.to(device)\n",
    "        labels = labels.to(device)\n",
    "        # 前向计算\n",
    "        logits = model(datas)\n",
    "        loss = loss_fct(logits, labels)         # 验证集损失\n",
    "        loss_list.append(loss.item())\n",
    "        \n",
    "        preds = logits.argmax(axis=-1)    # 验证集预测\n",
    "        pred_list.extend(preds.cpu().numpy().tolist())\n",
    "        label_list.extend(labels.cpu().numpy().tolist())\n",
    "        \n",
    "    acc = accuracy_score(label_list, pred_list)\n",
    "    return np.mean(loss_list), acc\n"
   ],
   "outputs": [],
   "execution_count": 17
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### TensorBoard 可视化\n",
    "\n",
    "\n",
    "训练过程中可以使用如下命令启动tensorboard服务。\n",
    "\n",
    "```shell\n",
    "tensorboard \\\n",
    "    --logdir=runs \\     # log 存放路径\n",
    "    --host 0.0.0.0 \\    # ip\n",
    "    --port 8848         # 端口\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:17.291768Z",
     "start_time": "2025-02-28T14:55:14.264885Z"
    }
   },
   "source": [
    "from torch.utils.tensorboard import SummaryWriter\n",
    "\n",
    "\n",
    "class TensorBoardCallback:\n",
    "    def __init__(self, log_dir, flush_secs=10):\n",
    "        \"\"\"\n",
    "        Args:\n",
    "            log_dir (str): dir to write log.\n",
    "            flush_secs (int, optional): write to dsk each flush_secs seconds. Defaults to 10.\n",
    "        \"\"\"\n",
    "        self.writer = SummaryWriter(log_dir=log_dir, flush_secs=flush_secs)\n",
    "\n",
    "    def draw_model(self, model, input_shape):\n",
    "        self.writer.add_graph(model, input_to_model=torch.randn(input_shape))\n",
    "        \n",
    "    def add_loss_scalars(self, step, loss, val_loss):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/loss\", \n",
    "            tag_scalar_dict={\"loss\": loss, \"val_loss\": val_loss},\n",
    "            global_step=step,\n",
    "            )\n",
    "        \n",
    "    def add_acc_scalars(self, step, acc, val_acc):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/accuracy\",\n",
    "            tag_scalar_dict={\"accuracy\": acc, \"val_accuracy\": val_acc},\n",
    "            global_step=step,\n",
    "        )\n",
    "        \n",
    "    def add_lr_scalars(self, step, learning_rate):\n",
    "        self.writer.add_scalars(\n",
    "            main_tag=\"training/learning_rate\",\n",
    "            tag_scalar_dict={\"learning_rate\": learning_rate},\n",
    "            global_step=step,\n",
    "            \n",
    "        )\n",
    "    \n",
    "    def __call__(self, step, **kwargs):\n",
    "        # add loss\n",
    "        loss = kwargs.pop(\"loss\", None)\n",
    "        val_loss = kwargs.pop(\"val_loss\", None)\n",
    "        if loss is not None and val_loss is not None:\n",
    "            self.add_loss_scalars(step, loss, val_loss)\n",
    "        # add acc\n",
    "        acc = kwargs.pop(\"acc\", None)\n",
    "        val_acc = kwargs.pop(\"val_acc\", None)\n",
    "        if acc is not None and val_acc is not None:\n",
    "            self.add_acc_scalars(step, acc, val_acc)\n",
    "        # add lr\n",
    "        learning_rate = kwargs.pop(\"lr\", None)\n",
    "        if learning_rate is not None:\n",
    "            self.add_lr_scalars(step, learning_rate)\n"
   ],
   "outputs": [],
   "execution_count": 18
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Save Best\n"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:17.296765Z",
     "start_time": "2025-02-28T14:55:17.292764Z"
    }
   },
   "source": [
    "class SaveCheckpointsCallback:\n",
    "    def __init__(self, save_dir, save_step=5000, save_best_only=True):\n",
    "        \"\"\"\n",
    "        Save checkpoints each save_epoch epoch. \n",
    "        We save checkpoint by epoch in this implementation.\n",
    "        Usually, training scripts with pytorch evaluating model and save checkpoint by step.\n",
    "\n",
    "        Args:\n",
    "            save_dir (str): dir to save checkpoint\n",
    "            save_epoch (int, optional): the frequency to save checkpoint. Defaults to 1.\n",
    "            save_best_only (bool, optional): If True, only save the best model or save each model at every epoch.\n",
    "        \"\"\"\n",
    "        self.save_dir = save_dir\n",
    "        self.save_step = save_step\n",
    "        self.save_best_only = save_best_only\n",
    "        self.best_metrics = -1\n",
    "        \n",
    "        # mkdir\n",
    "        if not os.path.exists(self.save_dir):\n",
    "            os.mkdir(self.save_dir)\n",
    "        \n",
    "    def __call__(self, step, state_dict, metric=None):\n",
    "        if step % self.save_step > 0:\n",
    "            return\n",
    "        \n",
    "        if self.save_best_only:\n",
    "            assert metric is not None\n",
    "            if metric >= self.best_metrics:\n",
    "                # save checkpoints\n",
    "                torch.save(state_dict, os.path.join(self.save_dir, \"best.ckpt\"))\n",
    "                # update best metrics\n",
    "                self.best_metrics = metric\n",
    "        else:\n",
    "            torch.save(state_dict, os.path.join(self.save_dir, f\"{step}.ckpt\"))\n",
    "\n"
   ],
   "outputs": [],
   "execution_count": 19
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Early Stop"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-28T14:55:17.310816Z",
     "start_time": "2025-02-28T14:55:17.297700Z"
    }
   },
   "source": [
    "class EarlyStopCallback:\n",
    "    def __init__(self, patience=5, min_delta=0.01):\n",
    "        \"\"\"\n",
    "\n",
    "        Args:\n",
    "            patience (int, optional): Number of epochs with no improvement after which training will be stopped.. Defaults to 5.\n",
    "            min_delta (float, optional): Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute \n",
    "                change of less than min_delta, will count as no improvement. Defaults to 0.01.\n",
    "        \"\"\"\n",
    "        self.patience = patience\n",
    "        self.min_delta = min_delta\n",
    "        self.best_metric = -1\n",
    "        self.counter = 0\n",
    "        \n",
    "    def __call__(self, metric):\n",
    "        if metric >= self.best_metric + self.min_delta:\n",
    "            # update best metric\n",
    "            self.best_metric = metric\n",
    "            # reset counter \n",
    "            self.counter = 0\n",
    "        else: \n",
    "            self.counter += 1\n",
    "            \n",
    "    @property\n",
    "    def early_stop(self):\n",
    "        return self.counter >= self.patience\n"
   ],
   "outputs": [],
   "execution_count": 20
  },
  {
   "cell_type": "code",
   "metadata": {
    "jupyter": {
     "is_executing": true
    },
    "ExecuteTime": {
     "start_time": "2025-02-28T14:55:17.311814Z"
    }
   },
   "source": [
    "# 训练\n",
    "def training(\n",
    "    model, \n",
    "    train_loader, \n",
    "    val_loader, \n",
    "    epoch, \n",
    "    loss_fct, \n",
    "    optimizer, \n",
    "    tensorboard_callback=None,\n",
    "    save_ckpt_callback=None,\n",
    "    early_stop_callback=None,\n",
    "    eval_step=500,\n",
    "    ):\n",
    "    record_dict = {\n",
    "        \"train\": [],\n",
    "        \"val\": []\n",
    "    }\n",
    "    \n",
    "    global_step = 0\n",
    "    model.train()\n",
    "    with tqdm(total=epoch * len(train_loader)) as pbar:\n",
    "        for epoch_id in range(epoch):\n",
    "            # training\n",
    "            for datas, labels in train_loader:\n",
    "                datas = datas.to(device)\n",
    "                labels = labels.to(device)\n",
    "                # 梯度清空\n",
    "                optimizer.zero_grad()\n",
    "                # 模型前向计算\n",
    "                logits = model(datas)\n",
    "                # 计算损失\n",
    "                loss = loss_fct(logits, labels)\n",
    "                # 梯度回传\n",
    "                loss.backward()\n",
    "                # 调整优化器，包括学习率的变动等\n",
    "                optimizer.step()\n",
    "                preds = logits.argmax(axis=-1)\n",
    "            \n",
    "                acc = accuracy_score(labels.cpu().numpy(), preds.cpu().numpy())    \n",
    "                loss = loss.cpu().item()\n",
    "                # record\n",
    "                \n",
    "                record_dict[\"train\"].append({\n",
    "                    \"loss\": loss, \"acc\": acc, \"step\": global_step\n",
    "                })\n",
    "                \n",
    "                # evaluating\n",
    "                if global_step % eval_step == 0:\n",
    "                    model.eval()\n",
    "                    val_loss, val_acc = evaluating(model, val_loader, loss_fct)\n",
    "                    record_dict[\"val\"].append({\n",
    "                        \"loss\": val_loss, \"acc\": val_acc, \"step\": global_step\n",
    "                    })\n",
    "                    model.train()\n",
    "                    \n",
    "                    # 1. 使用 tensorboard 可视化\n",
    "                    if tensorboard_callback is not None:\n",
    "                        tensorboard_callback(\n",
    "                            global_step, \n",
    "                            loss=loss, val_loss=val_loss,\n",
    "                            acc=acc, val_acc=val_acc,\n",
    "                            lr=optimizer.param_groups[0][\"lr\"],\n",
    "                            )\n",
    "                \n",
    "                    # 2. 保存模型权重 save model checkpoint\n",
    "                    if save_ckpt_callback is not None:\n",
    "                        save_ckpt_callback(global_step, model.state_dict(), metric=val_acc)\n",
    "\n",
    "                    # 3. 早停 Early Stop\n",
    "                    if early_stop_callback is not None:\n",
    "                        early_stop_callback(val_acc)\n",
    "                        if early_stop_callback.early_stop:\n",
    "                            print(f\"Early stop at epoch {epoch_id} / global_step {global_step}\")\n",
    "                            return record_dict\n",
    "                    \n",
    "                # udate step\n",
    "                global_step += 1\n",
    "                pbar.update(1)\n",
    "                pbar.set_postfix({\"epoch\": epoch_id})\n",
    "        \n",
    "    return record_dict\n",
    "        \n",
    "\n",
    "epoch = 20\n",
    "\n",
    "model = ResNetForCifar10(num_classes=10)\n",
    "\n",
    "# 1. 定义损失函数 采用交叉熵损失\n",
    "loss_fct = nn.CrossEntropyLoss()\n",
    "# 2. 定义优化器 采用 Rmsprop\n",
    "# Optimizers specified in the torch.optim package\n",
    "# >>> # Assuming optimizer uses lr = 0.05 for all groups\n",
    "# >>> # lr = 0.05     if epoch < 30\n",
    "# >>> # lr = 0.005    if 30 <= epoch < 80\n",
    "# >>> # lr = 0.0005   if epoch >= 80\n",
    "# >>> scheduler = MultiStepLR(optimizer, milestones=[30,80], gamma=0.1)\n",
    "\n",
    "class OptimizerWithScheduler:\n",
    "    def __init__(self, parameters, lr, momentum, weight_decay):\n",
    "        self.optimizer = torch.optim.SGD(parameters, lr=lr, momentum=momentum, weight_decay=weight_decay) # 优化器\n",
    "        self.scheduler = torch.optim.lr_scheduler.MultiStepLR(self.optimizer, milestones=[32_000, 48_000], gamma=0.1) # 学习率衰减\n",
    "        \n",
    "    def step(self):\n",
    "        self.optimizer.step()\n",
    "        self.scheduler.step()\n",
    "        \n",
    "    @property\n",
    "    def param_groups(self):\n",
    "        return self.optimizer.param_groups\n",
    "        \n",
    "    def zero_grad(self):\n",
    "        self.optimizer.zero_grad()\n",
    "        \n",
    "optimizer = OptimizerWithScheduler(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4)\n",
    "\n",
    "# 1. tensorboard 可视化\n",
    "if not os.path.exists(\"runs\"):\n",
    "    os.mkdir(\"runs\")\n",
    "    \n",
    "exp_name = \"resnet34\"\n",
    "tensorboard_callback = TensorBoardCallback(f\"runs/{exp_name}\")\n",
    "tensorboard_callback.draw_model(model, [1, 3, IMAGE_SIZE, IMAGE_SIZE])\n",
    "# 2. save best\n",
    "if not os.path.exists(\"checkpoints\"):\n",
    "    os.makedirs(\"checkpoints\")\n",
    "save_ckpt_callback = SaveCheckpointsCallback(f\"checkpoints/{exp_name}\", save_step=len(train_dl), save_best_only=True)\n",
    "# 3. early stop\n",
    "early_stop_callback = EarlyStopCallback(patience=5)\n",
    "\n",
    "model = model.to(device)\n",
    "record = training(\n",
    "    model, \n",
    "    train_dl, \n",
    "    eval_dl, \n",
    "    epoch, \n",
    "    loss_fct, \n",
    "    optimizer, \n",
    "    tensorboard_callback=tensorboard_callback,\n",
    "    save_ckpt_callback=save_ckpt_callback,\n",
    "    early_stop_callback=early_stop_callback,\n",
    "    eval_step=len(train_dl)\n",
    "    )"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "  0%|          | 0/7040 [00:00<?, ?it/s]"
      ],
      "application/vnd.jupyter.widget-view+json": {
       "version_major": 2,
       "version_minor": 0,
       "model_id": "d9be2a8d9003470a9326d587e14f1be1"
      }
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "jupyter": {
     "is_executing": true
    }
   },
   "source": [
    "#画线要注意的是损失是不一定在零到1之间的\n",
    "def plot_learning_curves(record_dict, sample_step=500):\n",
    "    # build DataFrame\n",
    "    train_df = pd.DataFrame(record_dict[\"train\"]).set_index(\"step\").iloc[::sample_step]\n",
    "    val_df = pd.DataFrame(record_dict[\"val\"]).set_index(\"step\")\n",
    "\n",
    "    # plot\n",
    "    fig_num = len(train_df.columns)\n",
    "    fig, axs = plt.subplots(1, fig_num, figsize=(5 * fig_num, 5))\n",
    "    for idx, item in enumerate(train_df.columns):    \n",
    "        axs[idx].plot(train_df.index, train_df[item], label=f\"train_{item}\")\n",
    "        axs[idx].plot(val_df.index, val_df[item], label=f\"val_{item}\")\n",
    "        axs[idx].grid()\n",
    "        axs[idx].legend()\n",
    "        # axs[idx].set_xticks(range(0, train_df.index[-1], 5000))\n",
    "        # axs[idx].set_xticklabels(map(lambda x: f\"{int(x/1000)}k\", range(0, train_df.index[-1], 5000)))\n",
    "        axs[idx].set_xlabel(\"step\")\n",
    "    \n",
    "    plt.show()\n",
    "\n",
    "plot_learning_curves(record, sample_step=10)  #横坐标是 steps"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 评估"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "jupyter": {
     "is_executing": true
    }
   },
   "source": [
    "# dataload for evaluating\n",
    "\n",
    "# load checkpoints\n",
    "model.load_state_dict(torch.load(f\"checkpoints/{exp_name}/best.ckpt\", map_location=\"cpu\"))\n",
    "\n",
    "model.eval()\n",
    "loss, acc = evaluating(model, eval_dl, loss_fct)\n",
    "print(f\"loss:     {loss:.4f}\\naccuracy: {acc:.4f}\")"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 推理"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "jupyter": {
     "is_executing": true
    }
   },
   "source": [
    "# test_df\n",
    "test_ds = Cifar10Dataset(\"test\", transform=transforms_eval)\n",
    "test_dl = DataLoader(test_ds, batch_size=batch_size, shuffle=False, drop_last=False)\n",
    "\n",
    "preds_collect = []\n",
    "model.eval()\n",
    "for data, fake_label in tqdm(test_dl):\n",
    "    data = data.to(device=device)\n",
    "    logits = model(data)\n",
    "    preds = [test_ds.idx_to_label[idx] for idx in logits.argmax(axis=-1).cpu().tolist()]\n",
    "    preds_collect.extend(preds)\n",
    "    \n",
    "test_df[\"class\"] = preds_collect\n",
    "test_df.head()"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "metadata": {
    "jupyter": {
     "is_executing": true
    }
   },
   "source": [
    "# 导出 submission.csv\n",
    "test_df.to_csv(\"submission.csv\", index=False)"
   ],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.8"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
