{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "多层感知机（Multilayer Perceptron, MLP）是一种简单的前馈神经网络，常用于图像分类任务。下面是一个使用 PyTorch 实现的 MLP 模型，用于 MNIST 数据集的分类任务。我们将包括数据加载、模型定义、训练和预测的功能。\n",
    "### 1. 导入必要的库"
   ],
   "id": "89a73021623de82"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:02:21.338962Z",
     "start_time": "2024-11-11T03:02:21.334543Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "from torch.utils.data import DataLoader"
   ],
   "id": "initial_id",
   "outputs": [],
   "execution_count": 16
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 2. 加载和预处理数据",
   "id": "e7d45fad2c0cdf85"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:02:21.466186Z",
     "start_time": "2024-11-11T03:02:21.372137Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义数据变换\n",
    "transform = transforms.Compose([\n",
    "    transforms.ToTensor(),  # 将图像转换为 Tensor\n",
    "    transforms.Normalize((0.5,), (0.5,))  # 标准化\n",
    "])\n",
    "\n",
    "# 下载并加载训练数据\n",
    "train_dataset = torchvision.datasets.MNIST(root='../data', train=True, download=True, transform=transform)\n",
    "train_loader = DataLoader(train_dataset, batch_size=10, shuffle=True)\n",
    "\n",
    "# 下载并加载测试数据\n",
    "test_dataset = torchvision.datasets.MNIST(root='../data', train=False, download=True, transform=transform)\n",
    "test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False)"
   ],
   "id": "168f2c12c471bf4b",
   "outputs": [],
   "execution_count": 17
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 3. 定义多层感知机模型",
   "id": "413a270700ee34b2"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:02:21.473618Z",
     "start_time": "2024-11-11T03:02:21.468220Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MLP(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(MLP, self).__init__()\n",
    "        self.fc1 = nn.Linear(28 * 28, 128)  # 输入层到隐藏层\n",
    "        self.fc2 = nn.Linear(128, 64)       # 隐藏层到隐藏层\n",
    "        self.fc3 = nn.Linear(64, 10)        # 隐藏层到输出层\n",
    "        self.relu = nn.ReLU()               # 激活函数\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x.view(-1, 28 * 28)  # 将图像展平为一维向量\n",
    "        x = self.relu(self.fc1(x))\n",
    "        x = self.relu(self.fc2(x))\n",
    "        x = self.fc3(x)\n",
    "        return x"
   ],
   "id": "8c7b7ecd4177d2c8",
   "outputs": [],
   "execution_count": 18
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 4. 初始化模型、损失函数和优化器",
   "id": "c62f5b009d829755"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:02:21.481269Z",
     "start_time": "2024-11-11T03:02:21.475376Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = MLP()\n",
    "criterion = nn.CrossEntropyLoss()  # 交叉熵损失函数\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)  # Adam 优化器"
   ],
   "id": "6c65bde4fcc2639a",
   "outputs": [],
   "execution_count": 19
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 5. 训练模型",
   "id": "98d877341ef4bd4c"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:29.156613Z",
     "start_time": "2024-11-11T03:02:21.483556Z"
    }
   },
   "cell_type": "code",
   "source": [
    "num_epochs = 10\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    model.train()\n",
    "    running_loss = 0.0\n",
    "    for images, labels in train_loader:\n",
    "        optimizer.zero_grad()  # 清零梯度\n",
    "        outputs = model(images)  # 前向传播\n",
    "        loss = criterion(outputs, labels)  # 计算损失\n",
    "        loss.backward()  # 反向传播\n",
    "        optimizer.step()  # 更新权重\n",
    "\n",
    "        running_loss += loss.item()\n",
    "\n",
    "    print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {running_loss/len(train_loader):.4f}')\n"
   ],
   "id": "75306832591ec5bd",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/10], Loss: 0.3263\n",
      "Epoch [2/10], Loss: 0.1676\n",
      "Epoch [3/10], Loss: 0.1352\n",
      "Epoch [4/10], Loss: 0.1167\n",
      "Epoch [5/10], Loss: 0.1023\n",
      "Epoch [6/10], Loss: 0.0937\n",
      "Epoch [7/10], Loss: 0.0869\n",
      "Epoch [8/10], Loss: 0.0828\n",
      "Epoch [9/10], Loss: 0.0792\n",
      "Epoch [10/10], Loss: 0.0751\n"
     ]
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 6. 测试模型",
   "id": "7e71bba245300e0b"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:31.603596Z",
     "start_time": "2024-11-11T03:06:29.158029Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def evaluate_model(model, data_loader):\n",
    "    model.eval()\n",
    "    correct = 0\n",
    "    total = 0\n",
    "    with torch.no_grad():\n",
    "        for images, labels in data_loader:\n",
    "            outputs = model(images)\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "\n",
    "    accuracy = 100 * correct / total\n",
    "    print(f'Accuracy of the model on the test set: {accuracy:.2f}%')\n",
    "\n",
    "evaluate_model(model, test_loader)"
   ],
   "id": "2952c212d915e805",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy of the model on the test set: 96.71%\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "### 7. 使用模型进行预测",
   "id": "3796e4456a3e2538"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:33.390211Z",
     "start_time": "2024-11-11T03:06:31.605Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def predict_image(model, image):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        image = image.view(-1, 28 * 28)  # 将图像展平为一维向量\n",
    "        output = model(image)\n",
    "        _, predicted = torch.max(output.data, 1)\n",
    "    return predicted.item()\n",
    "\n",
    "# 从测试集中随机选择一张图像进行预测\n",
    "import random\n",
    "\n",
    "image, label = random.choice(list(test_dataset))\n",
    "predicted_label = predict_image(model, image)\n",
    "print(f'Predicted label: {predicted_label}, True label: {label}')\n"
   ],
   "id": "564cb948311b9409",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predicted label: 2, True label: 2\n"
     ]
    }
   ],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:33.399817Z",
     "start_time": "2024-11-11T03:06:33.391367Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_labels(labels):\n",
    "    text_labels = [\"t-shirt(T恤)\", \"trouser(裤子)\", \"pullover(套衫)\", \"dress(连衣裙)\", \"coat(外套)\", \"sandal(凉鞋)\",\"shirt(衬衫)\", \"sneaker(运动鞋)\",\n",
    "                   \"bag(包)\", \"ankle boot(短靴)\"]\n",
    "    return [text_labels[int(label)] for label in labels ]\n",
    "for X,y in test_loader:\n",
    "    labels = get_labels(y)\n",
    "    predict_y = []\n",
    "    for image in X:\n",
    "        predicted = predict_image(model, image)\n",
    "        predict_y.append(predicted)\n",
    "    predicted_labels = get_labels(predict_y)\n",
    "    print(labels)\n",
    "    print(predicted_labels)\n",
    "    break;\n"
   ],
   "id": "6ab3f6c329d9404d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['sneaker(运动鞋)', 'pullover(套衫)', 'trouser(裤子)', 't-shirt(T恤)', 'coat(外套)', 'trouser(裤子)', 'coat(外套)', 'ankle boot(短靴)', 'sandal(凉鞋)', 'ankle boot(短靴)']\n",
      "['sneaker(运动鞋)', 'pullover(套衫)', 'trouser(裤子)', 't-shirt(T恤)', 'coat(外套)', 'trouser(裤子)', 'coat(外套)', 'ankle boot(短靴)', 'sandal(凉鞋)', 'ankle boot(短靴)']\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:33.407756Z",
     "start_time": "2024-11-11T03:06:33.400862Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def get_labels(labels):\n",
    "    text_labels = [\"t-shirt(T恤)\", \"trouser(裤子)\", \"pullover(套衫)\", \"dress(连衣裙)\", \"coat(外套)\", \"sandal(凉鞋)\",\"shirt(衬衫)\", \"sneaker(运动鞋)\",\n",
    "                   \"bag(包)\", \"ankle boot(短靴)\"]\n",
    "    return [text_labels[int(label)] for label in labels ]\n",
    "for X,y in test_loader:\n",
    "    break;\n",
    "trues = get_labels(y)\n",
    "preds = get_labels(model(X).argmax(axis=1))\n",
    "print(trues)\n",
    "print(preds)  "
   ],
   "id": "7d95879c0b300bb9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['sneaker(运动鞋)', 'pullover(套衫)', 'trouser(裤子)', 't-shirt(T恤)', 'coat(外套)', 'trouser(裤子)', 'coat(外套)', 'ankle boot(短靴)', 'sandal(凉鞋)', 'ankle boot(短靴)']\n",
      "['sneaker(运动鞋)', 'pullover(套衫)', 'trouser(裤子)', 't-shirt(T恤)', 'coat(外套)', 'trouser(裤子)', 'coat(外套)', 'ankle boot(短靴)', 'sandal(凉鞋)', 'ankle boot(短靴)']\n"
     ]
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2024-11-11T03:06:33.410653Z",
     "start_time": "2024-11-11T03:06:33.408767Z"
    }
   },
   "cell_type": "code",
   "source": "",
   "id": "cb14b1970c5b0f6c",
   "outputs": [],
   "execution_count": 24
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
