{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-02-12T01:35:26.294895Z",
     "start_time": "2025-02-12T01:35:22.638605Z"
    }
   },
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "from tqdm.auto import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, torch:\n",
    "    print(module.__name__, module.__version__)\n",
    "\n",
    "device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
    "print(device)\n",
    "\n",
    "seed = 42"
   ],
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sys.version_info(major=3, minor=12, micro=3, releaselevel='final', serial=0)\n",
      "matplotlib 3.10.0\n",
      "numpy 1.26.4\n",
      "pandas 2.2.3\n",
      "sklearn 1.6.0\n",
      "torch 2.3.1+cu121\n",
      "cuda:0\n"
     ]
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 数据准备",
   "id": "50caebe3fd3aafe5"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:35:28.147512Z",
     "start_time": "2025-02-12T01:35:27.239384Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Path: 用于处理文件路径。\n",
    "from pathlib import Path\n",
    "\n",
    "# 定义数据集的根目录为 ./cifar-10/\n",
    "# 使用 Path 对象处理文件路径\n",
    "DATA_DIR = Path(\"./cifar-10\")\n",
    "\n",
    "train_lables_file = DATA_DIR / \"trainLabels.csv\"  # 训练集标签csv文件\n",
    "test_csv_file = DATA_DIR / \"sampleSubmission.csv\"  #测试集模板csv文件\n",
    "train_folder = DATA_DIR / \"train\"  # 训练集图片文件夹\n",
    "test_folder = DATA_DIR / \"test\"  # 测试集图片文件夹\n",
    "\n",
    "#所有的类别\n",
    "class_names = [\n",
    "    'airplane',\n",
    "    'automobile',\n",
    "    'bird',\n",
    "    'cat',\n",
    "    'deer',\n",
    "    'dog',\n",
    "    'frog',\n",
    "    'horse',\n",
    "    'ship',\n",
    "    'truck',\n",
    "]\n",
    "\n",
    "\n",
    "# 定义一个函数 parse_csv_file，用于解析 CSV 文件并返回图像路径和标签的列表\n",
    "# filepath: CSV 文件的路径。\n",
    "# folder: 图像文件夹的路径\n",
    "# 读取 CSV 文件，解析每一行，生成图像路径和标签的元组列表\n",
    "def parse_csv_file(filepath, folder):\n",
    "    # 初始化一个空列表 results，用于存储解析结果\n",
    "    # 使用列表存储图像路径和标签的元组,方便后续访问和操作解析结果\n",
    "    results = []\n",
    "\n",
    "    # 打开 CSV 文件，读取所有行并跳过第一行（表头）\n",
    "    # 使用 open 函数打开文件，readlines() 读取所有行，[1:] 跳过表头\n",
    "    with open(filepath, 'r') as f:\n",
    "        lines = f.readlines()[1:]\n",
    "\n",
    "    # 遍历每一行数据，去除换行符并按逗号分隔，得到图像 ID 和标签\n",
    "    # 使用 strip('\\n') 去除换行符，split(',') 按逗号分隔,提取图像 ID 和标签\n",
    "    for line in lines:\n",
    "        image_id, label_str = line.strip('\\n').split(',')\n",
    "\n",
    "        # 拼接图像文件的完整路径\n",
    "        # 使用 Path 对象拼接路径，f\"{image_id}.png\" 生成图像文件名\n",
    "        image_full_path = folder / f\"{image_id}.png\"\n",
    "\n",
    "        # 将图像路径和标签的元组添加到 results 列表中\n",
    "        results.append((image_full_path, label_str))\n",
    "    return results\n",
    "\n",
    "\n",
    "#  调用 parse_csv_file 函数，解析训练集和测试集的 CSV 文件\n",
    "train_labels_info = parse_csv_file(train_lables_file, train_folder)\n",
    "test_csv_info = parse_csv_file(test_csv_file, test_folder)\n",
    "\n"
   ],
   "id": "e9a0b5972c590b81",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:35:28.397594Z",
     "start_time": "2025-02-12T01:35:28.361595Z"
    }
   },
   "cell_type": "code",
   "source": [
    "train_df = pd.DataFrame(train_labels_info[0:45000])  # 取前45000张图片作为训练集\n",
    "valid_df = pd.DataFrame(train_labels_info[45000:])  # 取后5000张图片作为验证集\n",
    "test_df = pd.DataFrame(test_csv_info)  # 取测试集\n",
    "\n",
    "train_df.columns = ['filepath', 'class']  # 给数据框添加列名\n",
    "valid_df.columns = ['filepath', 'class']\n",
    "test_df.columns = ['filepath', 'class']"
   ],
   "id": "9360b47fb4dfd84a",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:35:44.510498Z",
     "start_time": "2025-02-12T01:35:43.326337Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# PIL（Python Imaging Library）用于图像处理，Image 模块可以加载和操作图像\n",
    "from PIL import Image\n",
    "\n",
    "# Dataset 是 PyTorch 中用于定义数据集的基类\n",
    "# DataLoader 用于批量加载数据，支持多线程和数据打乱\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "# transforms 提供了图像预处理和数据增强的工具，如调整大小、旋转、归一化等\n",
    "from torchvision import transforms\n",
    "\n",
    "\n",
    "# 定义一个自定义数据集类，继承自 PyTorch 的 Dataset 类\n",
    "# CIFAR-10 数据集的加载方式与默认数据集不同，需要自定义\n",
    "class Cifar10Dataset(Dataset):\n",
    "    # 定义一个字典，将数据集模式（train/eval/test）映射到对应的数据框\n",
    "    # 通过 mode 参数快速获取对应的数据集\n",
    "    df_map = {\n",
    "        \"train\": train_df,\n",
    "        \"eval\": valid_df,\n",
    "        \"test\": test_df\n",
    "    }\n",
    "\n",
    "    # 创建一个字典，将类别名称映射到索引（如 \"cat\" -> 0）\n",
    "    # 使用字典推导式生成映射关系\n",
    "    # 模型需要数值标签，而数据通常以类别名称存储，因此需要转换\n",
    "    label_to_idx = {label: idx for idx, label in enumerate(class_names)}\n",
    "\n",
    "    # 创建一个字典，将索引映射回类别名称（如 0 -> \"cat\"）\n",
    "    # 与 label_to_idx 类似，但方向相反\n",
    "    # 在模型预测后，可能需要将索引转换回类别名称以便解释结果\n",
    "    idx_to_label = {idx: label for idx, label in enumerate(class_names)}\n",
    "\n",
    "    # 初始化 Cifar10Dataset 类的实例\n",
    "    # mode：指定数据集模式（train/eval/test）。\n",
    "    # transform：指定图像预处理和数据增强的方法\n",
    "    def __init__(self, mode, transform=None):\n",
    "        # 根据 mode 获取对应的数据框\n",
    "        # 使用字典的 get 方法获取值，如果 mode 不存在则返回 None\n",
    "        self.df = self.df_map.get(mode, None)\n",
    "\n",
    "        # 检查 mode 是否有效，如果无效则抛出错误\n",
    "        # 防止因无效模式导致后续代码出错\n",
    "        if self.df is None:\n",
    "            raise ValueError(\"mode should be one of train, val, test, but got {}\".format(mode))\n",
    "\n",
    "        # 保存 transform 参数到对象属性中\n",
    "        # 在 __getitem__ 方法中会用到 transform\n",
    "        self.transform = transform\n",
    "\n",
    "    # 根据索引获取单个样本（图像和标签）\n",
    "    # Dataset 类的核心方法，定义了如何获取数据,DataLoader 会调用此方法批量加载数据\n",
    "    def __getitem__(self, index):\n",
    "        # 从数据框中获取图像路径和标签\n",
    "        # 使用 iloc 方法按索引获取数据,数据框存储了图像路径和标签，需要按索引提取\n",
    "        img_path, label = self.df.iloc[index]\n",
    "\n",
    "        # 加载图像并转换为 RGB 格式\n",
    "        # Image.open 加载图像，convert('RGB') 确保图像为三通道\n",
    "        img = Image.open(img_path).convert('RGB')\n",
    "\n",
    "        # 对图像应用预处理和数据增强\n",
    "        img = self.transform(img)\n",
    "\n",
    "        # 将类别名称转换为索引,使用 label_to_idx 字典进行转换\n",
    "        label = self.label_to_idx[label]\n",
    "        return img, label\n",
    "\n",
    "    def __len__(self):\n",
    "        # 返回数据框的行数，即样本数量\n",
    "        # shape[0] 获取数据框的行数\n",
    "        return self.df.shape[0]\n",
    "\n",
    "\n",
    "# 定义图像的输入大小\n",
    "IMAGE_SIZE = 32\n",
    "mean, std = [0.4368, 0.4268, 0.3947], [0.2464, 0.2418, 0.2358]\n",
    "\n",
    "# 定义训练数据的预处理和数据增强方法\n",
    "# 数据增强（如旋转、翻转）可以提高模型的泛化能力，避免过拟合\n",
    "# 训练时需要增加数据的多样性，同时确保输入数据符合模型的期望格式\n",
    "transforms_train = transforms.Compose([\n",
    "    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),  # 将图像调整为指定大小（32x32）\n",
    "    transforms.RandomRotation(40),  # 随机旋转图像，最大角度为40度\n",
    "    transforms.RandomHorizontalFlip(),  # 随机水平翻转图像\n",
    "    transforms.ToTensor(),  # 将图像转换为PyTorch张量（Tensor），并将像素值从 [0, 255] 归一化到 [0, 1]。\n",
    "    transforms.Normalize(mean, std)  # 根据均值和标准差对图像进行归一化\n",
    "])\n",
    "\n",
    "# 验证/测试数据不需要数据增强，只需进行基本的预处理\n",
    "# 验证/测试时，需要保持数据的原始分布，以评估模型的真实性能\n",
    "transforms_eval = transforms.Compose([\n",
    "    transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),  # 将图像调整为指定大小（32x32）\n",
    "    transforms.ToTensor(),  # 将图像转换为PyTorch张量（Tensor），并将像素值从 [0, 255] 归一化到 [0, 1]。\n",
    "    transforms.Normalize(mean, std)  # 根据均值和标准差对图像进行归一化\n",
    "])\n",
    "\n",
    "# 创建训练数据集对象\n",
    "train_ds = Cifar10Dataset(\"train\", transforms_train)\n",
    "eval_ds = Cifar10Dataset(\"eval\", transforms_eval)"
   ],
   "id": "2bd537d3fe553faa",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:35:55.285540Z",
     "start_time": "2025-02-12T01:35:55.282365Z"
    }
   },
   "cell_type": "code",
   "source": [
    "batch_size = 64\n",
    "train_dl = DataLoader(train_ds, batch_size=batch_size, shuffle=True)\n",
    "eval_dl = DataLoader(eval_ds, batch_size=batch_size, shuffle=False)"
   ],
   "id": "f11b6ad4d49b956a",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 模型定义",
   "id": "7641762e4abdbf4f"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:36:02.900458Z",
     "start_time": "2025-02-12T01:36:02.797021Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class VGG(nn.Module):\n",
    "    def __init__(self, num_classes):\n",
    "        super().__init__()\n",
    "        self.model = nn.Sequential(\n",
    "            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2),\n",
    "            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2),\n",
    "            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2),\n",
    "            #下一个512\n",
    "            nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2),\n",
    "            #下一个512\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=\"same\"),\n",
    "            nn.ReLU(),\n",
    "            nn.MaxPool2d(kernel_size=2),\n",
    "            nn.Flatten(),\n",
    "            nn.Linear(512, num_classes),\n",
    "        )\n",
    "        self.init_weights()\n",
    "\n",
    "    def init_weights(self):\n",
    "        \"\"\"使用 xavier 均匀分布来初始化全连接层、卷积层的权重 W\"\"\"\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, (nn.Linear, nn.Conv2d)):\n",
    "                nn.init.xavier_uniform_(m.weight)\n",
    "                nn.init.zeros_(m.bias)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.model(x)\n",
    "\n",
    "\n",
    "for key, value in VGG(len(class_names)).named_parameters():\n",
    "    print(f\"{key:^40}paramerters num: {np.prod(value.shape)}\")\n"
   ],
   "id": "3d6f874a578deb38",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "             model.0.weight             paramerters num: 1728\n",
      "              model.0.bias              paramerters num: 64\n",
      "             model.2.weight             paramerters num: 36864\n",
      "              model.2.bias              paramerters num: 64\n",
      "             model.5.weight             paramerters num: 73728\n",
      "              model.5.bias              paramerters num: 128\n",
      "             model.7.weight             paramerters num: 147456\n",
      "              model.7.bias              paramerters num: 128\n",
      "            model.10.weight             paramerters num: 294912\n",
      "             model.10.bias              paramerters num: 256\n",
      "            model.12.weight             paramerters num: 589824\n",
      "             model.12.bias              paramerters num: 256\n",
      "            model.14.weight             paramerters num: 589824\n",
      "             model.14.bias              paramerters num: 256\n",
      "            model.16.weight             paramerters num: 589824\n",
      "             model.16.bias              paramerters num: 256\n",
      "            model.19.weight             paramerters num: 1179648\n",
      "             model.19.bias              paramerters num: 512\n",
      "            model.21.weight             paramerters num: 2359296\n",
      "             model.21.bias              paramerters num: 512\n",
      "            model.23.weight             paramerters num: 2359296\n",
      "             model.23.bias              paramerters num: 512\n",
      "            model.25.weight             paramerters num: 2359296\n",
      "             model.25.bias              paramerters num: 512\n",
      "            model.28.weight             paramerters num: 2359296\n",
      "             model.28.bias              paramerters num: 512\n",
      "            model.30.weight             paramerters num: 2359296\n",
      "             model.30.bias              paramerters num: 512\n",
      "            model.32.weight             paramerters num: 2359296\n",
      "             model.32.bias              paramerters num: 512\n",
      "            model.34.weight             paramerters num: 2359296\n",
      "             model.34.bias              paramerters num: 512\n",
      "            model.38.weight             paramerters num: 5120\n",
      "             model.38.bias              paramerters num: 10\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-02-12T01:36:05.706731Z",
     "start_time": "2025-02-12T01:36:05.613009Z"
    }
   },
   "cell_type": "code",
   "source": [
    "#模型总参数量\n",
    "total_params = sum(p.numel() for p in VGG(len(class_names)).parameters() if p.requires_grad)\n",
    "print(f\"Total trainable parameters: {total_params}\")"
   ],
   "id": "c946f3289dd64619",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total trainable parameters: 20029514\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "107560269cf3aec5"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
