{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "# 多层感知器（MLP）",
   "id": "d31df7d65c38eebc"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:40.448710Z",
     "start_time": "2025-04-27T11:13:39.798512Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt"
   ],
   "id": "3a05a9a2e848b0e0",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 读取 MNIST 数据集并预处理",
   "id": "fdf4629b79ee580a"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:44.777390Z",
     "start_time": "2025-04-27T11:13:42.081389Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sklearn.preprocessing import StandardScaler\n",
    "from sklearn.model_selection import train_test_split\n",
    "import torch\n",
    "\n",
    "Scaler = StandardScaler()\n",
    "\n",
    "def load_data(csv_path):\n",
    "    # 读取CSV\n",
    "    df = pd.read_csv(csv_path)\n",
    "    # 分离特征和标签\n",
    "    x = df.iloc[:, 1:].values\n",
    "    y = df.iloc[:, 0]\n",
    "    # One-Hot 独热编码\n",
    "    y = pd.get_dummies(y, dtype=float).values\n",
    "    # 展示图像数据\n",
    "    plt.imshow(x[0].reshape(28, 28), cmap='gray')\n",
    "    plt.axis('off')\n",
    "    plt.show()\n",
    "    # 数据标准化\n",
    "    x = Scaler.fit_transform(x)\n",
    "    # 数据切分\n",
    "    x_train, x_val, y_train, y_val = train_test_split(x, y, random_state=42)\n",
    "    # 转换为 tensor 格式\n",
    "    train_features = torch.tensor(x_train, dtype=torch.float)\n",
    "    train_labels = torch.tensor(y_train, dtype=torch.float)\n",
    "    val_features = torch.tensor(x_val, dtype=torch.float)\n",
    "    val_labels = torch.tensor(y_val, dtype=torch.float)\n",
    "    \n",
    "    return train_features, train_labels, val_features, val_labels"
   ],
   "id": "78af2368645e24d8",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 加载数据",
   "id": "d9c87348b252ab21"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:48.910593Z",
     "start_time": "2025-04-27T11:13:46.590438Z"
    }
   },
   "cell_type": "code",
   "source": "train_feats, train_lbs, val_feats, val_lbs = load_data('./../data/train.csv')",
   "id": "2ae0f11a951e7f69",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<Figure size 640x480 with 1 Axes>"
      ],
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYUAAAGFCAYAAAASI+9IAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8hTgPZAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAH9ElEQVR4nO3cMYvU1wKH4RldUkaLJSkkRRArA4KFIJJCTJ+EVFFE7PwCVhKsLLZPZ7UGkiAImqRJWLCTELAS8gHyAQJhm+iS/ae6L/ei9945w467WZ+nnh9zGufdU3jm0zRNMwCYzWZH9vsAABwcogBARAGAiAIAEQUAIgoARBQAiCgAkLVFPzifz1d5DgBWbJH/q+ymAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQCytt8HAP75tra2hjeXLl1a6ruuXbs2vLl3795S3/UmclMAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgDxIB7wHx4/fjy8uXDhwvBmd3d3eDObzWbTNC21YzFuCgBEFACIKAAQUQAgogBARAGAiAIAEQUAIgoARBQAiCgAEFEAIB7Eg0Ps1q1bw5vz588Pb44ePTq8uX///vBmNpvNHjx4sNSOxbgpABBRACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAzKdpmhb64Hy+6rMA/8Mnn3wyvPnmm2+GN2+99dbw5tmzZ8ObDz/8cHgzm81m29vbS+2YzRb5uXdTACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAsrbfB4A3zXvvvbfU7vbt28ObZV48/f3334c3X3zxxfDGa6cHk5sCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBEFADIfJqmaaEPzuerPgv845w7d254c/fu3aW+64MPPlhqN+rKlSvDm2+//XYFJ2GvLfJz76YAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQCytt8HgIPi6tWrw5vNzc3hzYJvUL7kjz/+GN5sbW0Nb3788cfhDYeHmwIAEQUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgH8TiU3n333eHNzZs3V3CSvfPo0aPhzfXr11dwEg4zNwUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACBeSeXAO378+PDmp59+Gt6cPn16eLOM7e3tpXbffffdHp8EXuamAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAMp+maVrog/P5qs8Cr3TixInhzW+//baCk7xsmX8Xx44dW+q7ln1ID/5lkZ97NwUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoAJC1/T4Ab4719fWldt9///3w5nU94Pjzzz8Pb168eLGCk8DecFMAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgDxIB6vzZdffrnU7syZM8ObaZqGN0+ePBnefPTRR8Ob58+fD2/gdXFTACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIA8SAeS1lfXx/enDx5cgUnebWdnZ3hzcbGxvDG43YcNm4KAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBAvJLK7J133hnefP3118Obs2fPDm9ms9nszz//HN7cuHFjePPDDz8Mb+CwcVMAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgDxIB6zTz/9dHhz8eLFFZzk1X755ZfhzVdffbWCk8Dh56YAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQDiQbxD5vPPPx/ebGxsrOAkL3vy5MlSu8uXL+/xSYD/xk0BgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBkPk3TtNAH5/NVn4V/c+zYsaV2T58+Hd68//77S33XqM8++2yp3cOHD/f2IPCGWuTn3k0BgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBkbb8PwKt9/PHHS+1e1+N2y3j77bf3+wjA/+GmAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAIgoAxCupB9TOzs5Su93d3eHNkSPjfxv89ddfw5tTp04Nb4DXy00BgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBkPk3TtNAH5/NVn4U98Ouvvw5v1tbG30W8c+fO8GZzc3N4A+ydRX7u3RQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEA8iAfwhvAgHgBDRAGAiAIAEQUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAIgoARBQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAIgoABBRACCiAEBEAYCIAgARBQAiCgBEFACIKAAQUQAgogBARAGAiAIAEQUAsrboB6dpWuU5ADgA3BQAiCgAEFEAIKIAQEQBgIgCABEFACIKAEQUAMjfgmfKyqzx7soAAAAASUVORK5CYII="
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 创建数据加载器",
   "id": "32f7cf5f64c6e2b7"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:50.290017Z",
     "start_time": "2025-04-27T11:13:50.284242Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch.utils.data import TensorDataset, DataLoader\n",
    "\n",
    "batch_size = 64\n",
    "\n",
    "train_dataset = TensorDataset(train_feats, train_lbs)\n",
    "train_loader = DataLoader(\n",
    "    dataset=train_dataset, \n",
    "    batch_size=batch_size, \n",
    "    shuffle=True,       # 使每个batch包含多样化的样本分布，提升模型泛化能力\n",
    "    num_workers=2,      # 加速数据加载\n",
    "    pin_memory=True     # 加速GPU数据传输\n",
    ")\n",
    "\n",
    "val_dataset = TensorDataset(val_feats, val_lbs)\n",
    "val_loader = DataLoader(\n",
    "    dataset=val_dataset, \n",
    "    batch_size=batch_size, \n",
    "    shuffle=False,      # 便于结果复现和错误样本追踪\n",
    "    num_workers=2, \n",
    "    pin_memory=True\n",
    ")"
   ],
   "id": "8499769efd9f8b7d",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 定义神经网络",
   "id": "31c4d569b40651ec"
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 定义类",
   "id": "4cec349e8e50a66c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:52.933274Z",
     "start_time": "2025-04-27T11:13:52.926438Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch import nn\n",
    "\n",
    "class MnistNN(nn.Module):\n",
    "    def __init__(self, input_size, hidden_size1, hidden_size2, num_classes, p):\n",
    "        super().__init__()\n",
    "        self.input_size = input_size\n",
    "        self.hidden1 = nn.Linear(input_size, hidden_size1)\n",
    "        self.hidden2 = nn.Linear(hidden_size1, hidden_size2)\n",
    "        self.output = nn.Linear(hidden_size2, num_classes)\n",
    "        self.relu = nn.ReLU()\n",
    "        self.dropout = nn.Dropout(p=p)      # 随机失活层，通过概率性关闭神经元来防止神经网络过拟合\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # 展平输入\n",
    "        x = x.view(-1, self.input_size)\n",
    "        # 隐层 1\n",
    "        out = self.relu(self.hidden1(x))\n",
    "        out = self.dropout(out)\n",
    "        # 隐层 2\n",
    "        out = self.relu(self.hidden2(out))\n",
    "        out = self.dropout(out)\n",
    "        # 输出层\n",
    "        out = self.output(out)\n",
    "        return out"
   ],
   "id": "670e9e282d29ba64",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 设备配置 并 初始化模型",
   "id": "6b900119c73c953b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:54.956926Z",
     "start_time": "2025-04-27T11:13:54.274005Z"
    }
   },
   "cell_type": "code",
   "source": [
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model = MnistNN(train_feats.shape[1], 512, 256, 10, 0.5).to(device)\n",
    "model"
   ],
   "id": "ba7290b64ffe4711",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "MnistNN(\n",
       "  (hidden1): Linear(in_features=784, out_features=512, bias=True)\n",
       "  (hidden2): Linear(in_features=512, out_features=256, bias=True)\n",
       "  (output): Linear(in_features=256, out_features=10, bias=True)\n",
       "  (relu): ReLU()\n",
       "  (dropout): Dropout(p=0.5, inplace=False)\n",
       ")"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 损失函数和优化器",
   "id": "44ec4de4fa479e2d"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:13:57.314362Z",
     "start_time": "2025-04-27T11:13:55.418093Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch import optim\n",
    "\n",
    "learning_rate = 0.001\n",
    "criterion = nn.CrossEntropyLoss()       # 结合了Softmax和负对数似然损失，在多分类任务中很常见。\n",
    "optimizer = optim.Adam(model.parameters(), lr=learning_rate)    # Adam自适应学习率机制"
   ],
   "id": "3241d1272db26abb",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 训练循环",
   "id": "a79d82868063d8f0"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:15:43.799743Z",
     "start_time": "2025-04-27T11:13:58.722283Z"
    }
   },
   "cell_type": "code",
   "source": [
    "num_epochs = 30\n",
    "total_step = len(train_loader)\n",
    "for epoch in range(num_epochs):\n",
    "    for i, (features, labels) in enumerate(train_loader):\n",
    "        # 传入到 device 中\n",
    "        features = features.to(device)\n",
    "        labels = labels.to(device)\n",
    "        # 前向传播\n",
    "        outputs = model(features)\n",
    "        loss = criterion(outputs, labels)\n",
    "        # 反向传播\n",
    "        optimizer.zero_grad()   # 清空梯度\n",
    "        loss.backward()\n",
    "        # 优化器优化，即更新参数\n",
    "        optimizer.step()\n",
    "        # 输出日志\n",
    "        if (i + 1) % 100 == 0:\n",
    "            # .item()的作用是将包含单个元素的张量转换为标准Python数值\n",
    "            print(f'epoch: [{epoch + 1}/{num_epochs}], step: [{i + 1}/{total_step}], loss: {loss.item():.4f}')"
   ],
   "id": "6d1e584ff4bfc3f4",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: [1/30], step: [100/493], loss: 0.5538\n",
      "epoch: [1/30], step: [200/493], loss: 0.5911\n",
      "epoch: [1/30], step: [300/493], loss: 0.2213\n",
      "epoch: [1/30], step: [400/493], loss: 0.1856\n",
      "epoch: [2/30], step: [100/493], loss: 0.1752\n",
      "epoch: [2/30], step: [200/493], loss: 0.1746\n",
      "epoch: [2/30], step: [300/493], loss: 0.5194\n",
      "epoch: [2/30], step: [400/493], loss: 0.1133\n",
      "epoch: [3/30], step: [100/493], loss: 0.2917\n",
      "epoch: [3/30], step: [200/493], loss: 0.2487\n",
      "epoch: [3/30], step: [300/493], loss: 0.0834\n",
      "epoch: [3/30], step: [400/493], loss: 0.2578\n",
      "epoch: [4/30], step: [100/493], loss: 0.0584\n",
      "epoch: [4/30], step: [200/493], loss: 0.2299\n",
      "epoch: [4/30], step: [300/493], loss: 0.1360\n",
      "epoch: [4/30], step: [400/493], loss: 0.1319\n",
      "epoch: [5/30], step: [100/493], loss: 0.1639\n",
      "epoch: [5/30], step: [200/493], loss: 0.1521\n",
      "epoch: [5/30], step: [300/493], loss: 0.1023\n",
      "epoch: [5/30], step: [400/493], loss: 0.0198\n",
      "epoch: [6/30], step: [100/493], loss: 0.2972\n",
      "epoch: [6/30], step: [200/493], loss: 0.1646\n",
      "epoch: [6/30], step: [300/493], loss: 0.0170\n",
      "epoch: [6/30], step: [400/493], loss: 0.2261\n",
      "epoch: [7/30], step: [100/493], loss: 0.1159\n",
      "epoch: [7/30], step: [200/493], loss: 0.0400\n",
      "epoch: [7/30], step: [300/493], loss: 0.1540\n",
      "epoch: [7/30], step: [400/493], loss: 0.3562\n",
      "epoch: [8/30], step: [100/493], loss: 0.1263\n",
      "epoch: [8/30], step: [200/493], loss: 0.1618\n",
      "epoch: [8/30], step: [300/493], loss: 0.2299\n",
      "epoch: [8/30], step: [400/493], loss: 0.0124\n",
      "epoch: [9/30], step: [100/493], loss: 0.0823\n",
      "epoch: [9/30], step: [200/493], loss: 0.0564\n",
      "epoch: [9/30], step: [300/493], loss: 0.2133\n",
      "epoch: [9/30], step: [400/493], loss: 0.2095\n",
      "epoch: [10/30], step: [100/493], loss: 0.0228\n",
      "epoch: [10/30], step: [200/493], loss: 0.0704\n",
      "epoch: [10/30], step: [300/493], loss: 0.0979\n",
      "epoch: [10/30], step: [400/493], loss: 0.0558\n",
      "epoch: [11/30], step: [100/493], loss: 0.2132\n",
      "epoch: [11/30], step: [200/493], loss: 0.1356\n",
      "epoch: [11/30], step: [300/493], loss: 0.0652\n",
      "epoch: [11/30], step: [400/493], loss: 0.2156\n",
      "epoch: [12/30], step: [100/493], loss: 0.0728\n",
      "epoch: [12/30], step: [200/493], loss: 0.1038\n",
      "epoch: [12/30], step: [300/493], loss: 0.1088\n",
      "epoch: [12/30], step: [400/493], loss: 0.1810\n",
      "epoch: [13/30], step: [100/493], loss: 0.0632\n",
      "epoch: [13/30], step: [200/493], loss: 0.0627\n",
      "epoch: [13/30], step: [300/493], loss: 0.0432\n",
      "epoch: [13/30], step: [400/493], loss: 0.0927\n",
      "epoch: [14/30], step: [100/493], loss: 0.1807\n",
      "epoch: [14/30], step: [200/493], loss: 0.0597\n",
      "epoch: [14/30], step: [300/493], loss: 0.1158\n",
      "epoch: [14/30], step: [400/493], loss: 0.1071\n",
      "epoch: [15/30], step: [100/493], loss: 0.0563\n",
      "epoch: [15/30], step: [200/493], loss: 0.1002\n",
      "epoch: [15/30], step: [300/493], loss: 0.3033\n",
      "epoch: [15/30], step: [400/493], loss: 0.1499\n",
      "epoch: [16/30], step: [100/493], loss: 0.0593\n",
      "epoch: [16/30], step: [200/493], loss: 0.1172\n",
      "epoch: [16/30], step: [300/493], loss: 0.1615\n",
      "epoch: [16/30], step: [400/493], loss: 0.0267\n",
      "epoch: [17/30], step: [100/493], loss: 0.0599\n",
      "epoch: [17/30], step: [200/493], loss: 0.1417\n",
      "epoch: [17/30], step: [300/493], loss: 0.0663\n",
      "epoch: [17/30], step: [400/493], loss: 0.0074\n",
      "epoch: [18/30], step: [100/493], loss: 0.0760\n",
      "epoch: [18/30], step: [200/493], loss: 0.0382\n",
      "epoch: [18/30], step: [300/493], loss: 0.0257\n",
      "epoch: [18/30], step: [400/493], loss: 0.0760\n",
      "epoch: [19/30], step: [100/493], loss: 0.1387\n",
      "epoch: [19/30], step: [200/493], loss: 0.0126\n",
      "epoch: [19/30], step: [300/493], loss: 0.0404\n",
      "epoch: [19/30], step: [400/493], loss: 0.0973\n",
      "epoch: [20/30], step: [100/493], loss: 0.0775\n",
      "epoch: [20/30], step: [200/493], loss: 0.2348\n",
      "epoch: [20/30], step: [300/493], loss: 0.0165\n",
      "epoch: [20/30], step: [400/493], loss: 0.0091\n",
      "epoch: [21/30], step: [100/493], loss: 0.0124\n",
      "epoch: [21/30], step: [200/493], loss: 0.0577\n",
      "epoch: [21/30], step: [300/493], loss: 0.0073\n",
      "epoch: [21/30], step: [400/493], loss: 0.0129\n",
      "epoch: [22/30], step: [100/493], loss: 0.1540\n",
      "epoch: [22/30], step: [200/493], loss: 0.1073\n",
      "epoch: [22/30], step: [300/493], loss: 0.0332\n",
      "epoch: [22/30], step: [400/493], loss: 0.1424\n",
      "epoch: [23/30], step: [100/493], loss: 0.1331\n",
      "epoch: [23/30], step: [200/493], loss: 0.0358\n",
      "epoch: [23/30], step: [300/493], loss: 0.2051\n",
      "epoch: [23/30], step: [400/493], loss: 0.1011\n",
      "epoch: [24/30], step: [100/493], loss: 0.0620\n",
      "epoch: [24/30], step: [200/493], loss: 0.2237\n",
      "epoch: [24/30], step: [300/493], loss: 0.2799\n",
      "epoch: [24/30], step: [400/493], loss: 0.1452\n",
      "epoch: [25/30], step: [100/493], loss: 0.2695\n",
      "epoch: [25/30], step: [200/493], loss: 0.0362\n",
      "epoch: [25/30], step: [300/493], loss: 0.1182\n",
      "epoch: [25/30], step: [400/493], loss: 0.0354\n",
      "epoch: [26/30], step: [100/493], loss: 0.0174\n",
      "epoch: [26/30], step: [200/493], loss: 0.3241\n",
      "epoch: [26/30], step: [300/493], loss: 0.0973\n",
      "epoch: [26/30], step: [400/493], loss: 0.0105\n",
      "epoch: [27/30], step: [100/493], loss: 0.0924\n",
      "epoch: [27/30], step: [200/493], loss: 0.0723\n",
      "epoch: [27/30], step: [300/493], loss: 0.0544\n",
      "epoch: [27/30], step: [400/493], loss: 0.0230\n",
      "epoch: [28/30], step: [100/493], loss: 0.1243\n",
      "epoch: [28/30], step: [200/493], loss: 0.0091\n",
      "epoch: [28/30], step: [300/493], loss: 0.2078\n",
      "epoch: [28/30], step: [400/493], loss: 0.0277\n",
      "epoch: [29/30], step: [100/493], loss: 0.1755\n",
      "epoch: [29/30], step: [200/493], loss: 0.0162\n",
      "epoch: [29/30], step: [300/493], loss: 0.0433\n",
      "epoch: [29/30], step: [400/493], loss: 0.0045\n",
      "epoch: [30/30], step: [100/493], loss: 0.2844\n",
      "epoch: [30/30], step: [200/493], loss: 0.1921\n",
      "epoch: [30/30], step: [300/493], loss: 0.0840\n",
      "epoch: [30/30], step: [400/493], loss: 0.0249\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "## 测试阶段\n",
    "\n",
    "---\n",
    "\n",
    "model.eval( ) 负责切换网络层模式为评估模式：\n",
    "\n",
    "- 禁用训练专用机制：关闭训练阶段特有的随机性操作（如Dropout）和动态统计量计算（如BatchNorm）\n",
    "\n",
    "确保推理结果的稳定性和可复现性\n",
    "\n",
    "---\n",
    "\n",
    "torch.no_grad( ) 用于临时禁用梯度计算的上下文管理器：\n",
    "\n",
    "- 禁止计算图生成\n",
    "- 所有操作产生的张量requires_grad自动设为False\n",
    "\n",
    "这使得前向传播时的显存占用减少约30%，计算速度提升20%-40%\n",
    "\n",
    "---"
   ],
   "id": "8fa6ef8bbc21927"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:15:52.733184Z",
     "start_time": "2025-04-27T11:15:49.867719Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    correct = 0\n",
    "    for features, labels in val_loader:\n",
    "        # 进行一次前行传播\n",
    "        features = features.to(device)\n",
    "        labels = labels.to(device)\n",
    "        outputs = model(features)\n",
    "        # 找出概率最大的一个\n",
    "        _, predicted = torch.max(outputs.data, 1)\n",
    "        _, labels = torch.max(labels.data, 1)\n",
    "        # 统计预测正确个数(correct)\n",
    "        correct += (predicted == labels).sum().item()\n",
    "    \n",
    "    print(f'Accuracy: {(correct / val_feats.size(0)) * 100:.2f}%')"
   ],
   "id": "1f682acac96f4d1c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy: 97.25%\n"
     ]
    }
   ],
   "execution_count": 9
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "## 输出结果",
   "id": "b1821317252a5374"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:15:55.663304Z",
     "start_time": "2025-04-27T11:15:54.747595Z"
    }
   },
   "cell_type": "code",
   "source": [
    "test_dataset = pd.read_csv('./../data/test.csv').values\n",
    "test_scaler = Scaler.transform(test_dataset)\n",
    "test_features = torch.tensor(test_scaler, dtype=torch.float).to(device)"
   ],
   "id": "a13531993d0d0487",
   "outputs": [],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:15:58.155280Z",
     "start_time": "2025-04-27T11:15:58.046393Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model.eval()\n",
    "with torch.no_grad():\n",
    "    outputs = model(test_features)\n",
    "    _, predicted = torch.max(outputs.data, 1)\n",
    "    predictions = predicted.cpu().numpy()"
   ],
   "id": "3b79a77c2a69238b",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-04-27T11:15:59.358486Z",
     "start_time": "2025-04-27T11:15:59.325515Z"
    }
   },
   "cell_type": "code",
   "source": [
    "result = pd.DataFrame({\n",
    "    'ImageId': range(1, len(predictions) + 1), \n",
    "    'Label': predictions\n",
    "})\n",
    "result.to_csv('./../data/result.csv', index=False)"
   ],
   "id": "8aff2166ad20f7f3",
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "![](./../img/7.png)",
   "id": "23fce1f5f7d06189"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
