{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "066ea76f",
   "metadata": {},
   "source": [
    "# 1、导包"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "de33f5d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn  # 神经网络的一些层\n",
    "import torch.optim as optim  # 优化算法模块，小批量随机梯度下降算法\n",
    "from torch.nn import functional as F  # 激活函数\n",
    "from torch.utils.data import DataLoader, TensorDataset  # 拆装数据\n",
    "import torchvision  # torch视觉方面\n",
    "import torchvision.transforms as transforms  # 图像处理转化为张量"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "79fa0748",
   "metadata": {},
   "source": [
    "# 2、确定数据、确定超参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "d80a9b6f",
   "metadata": {},
   "outputs": [],
   "source": [
    "lr = 0.15  # 学习率\n",
    "gamma = 0  # 动量参数\n",
    "epochs = 15  # 训练的轮次\n",
    "bs = 128  # batchsize"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ddeddc7b",
   "metadata": {},
   "source": [
    "# 3、导入数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a5318f9d",
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist = torchvision.datasets.FashionMNIST(root=\"\"\n",
    "                                 ,download=True\n",
    "                                 ,train=True\n",
    "                                 # 将FashionMNIST数据集中的图像输出转换为以张量形式的与处理步骤\n",
    "                                 ,transform=transforms.ToTensor()\n",
    "                                 )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "28105d77",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 数据切分\n",
    "batchdata = DataLoader(mnist\n",
    "                      ,batch_size=bs\n",
    "                      ,shuffle=True  # 打乱\n",
    "                      )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "03d91136",
   "metadata": {},
   "outputs": [],
   "source": [
    "input_ = mnist.data[0].numel()\n",
    "output_ = len(mnist.targets.unique())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a02e6c70",
   "metadata": {},
   "source": [
    "# 4、定义神经网络架构"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "9f19309f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Model(nn.Module):\n",
    "    def __init__(self, in_features=10, out_features=2):\n",
    "        super().__init__()  # 把nn.Module中的init（构造函数）中的所有代码拿过来\n",
    "        # 输入通道数，因为为灰度数据，所以通道数为1\n",
    "        # 输出通道数\n",
    "        # 卷积核大小，选用3×3\n",
    "        # 步长为1\n",
    "        # padding填充的大小，用于保持输出特征图和输入特征图的尺寸一致，就相当于在输入图像周围添加一圈像素\n",
    "        self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=3, stride=1, padding=1)\n",
    "        # 输入特征图的通道数，也就是局哪几层的输出通道数\n",
    "        self.normalize = nn.BatchNorm2d(num_features=16)\n",
    "        self.linear1 = nn.Linear(16 * 28 * 28, 128, bias=False)\n",
    "        self.output = nn.Linear(128, out_features, bias=False)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # x是一个批次的图像数据，尺寸为（batch_size, channels, height, weight）\n",
    "        # 输出的尺寸为（batch_size, out_channels, height, weight）\n",
    "        x = self.conv1(x)\n",
    "        x = self.normalize(x)\n",
    "        x = F.relu(x)\n",
    "        # 将特征图展平为一维向量，以便传递给后续的线性层\n",
    "        x = x.view(-1, 16 * 28 * 28)\n",
    "        x = self.linear1(x)\n",
    "        x = F.relu(x)\n",
    "        final_output = self.output(x)\n",
    "        sigma2 = F.log_softmax(final_output, dim=1)\n",
    "        return sigma2"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "03823c47",
   "metadata": {},
   "source": [
    "# 5、定义损失函数，优化算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "4b99ce35",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 会直接定义一个训练函数\n",
    "def fit(net,batch_data,lr=0.01,epochs=5,gamma=0):\n",
    "    criterion = nn.NLLLoss()  # 定义损失函数\n",
    "    \n",
    "    samples = 0  # 循环开始之前，模型一个样本都没有见过\n",
    "    correct = 0  # 循环开始之前，模型正确的值为0\n",
    "    \n",
    "    # 小批量随机梯度下降算法\n",
    "    opt = optim.SGD(net.parameters()\n",
    "                   ,lr = lr\n",
    "                   ,momentum=gamma\n",
    "                   )\n",
    "    for epoch in range(epochs):  # 全部的数据会被训练几次\n",
    "        for batch_idx,(x,y) in enumerate(batchdata):\n",
    "            # 调整张量y的形状，以适应张量x的形状，以便在后续的操作中进行比较\n",
    "            # 数据降维\n",
    "            y = y.view(x.shape[0])\n",
    "            sigma = net.forward(x)  # 正向传播\n",
    "            loss = criterion(sigma, y)  # 计算损失函数\n",
    "            loss.backward()  # 反向传播\n",
    "            opt.step()  # 更新权重\n",
    "            opt.zero_grad()  # 对模型的梯度清零\n",
    "            \n",
    "            # 每训练一个batch的数据，模型见过的数据就会增加x.shape[0]\n",
    "            samples += x.shape[0]\n",
    "            \n",
    "            \"\"\"\n",
    "            sigma: 模型的输出张量，表示模型对输入数据的预测\n",
    "            求解准确率，全部判断正确的样本数量/已经看过的总样本量\n",
    "            sigma = [[0.1,0.2]\n",
    "                    ,[0.4,0.5]]\n",
    "                    \n",
    "             max_values=[0.2,0.5]\n",
    "             idx = [1,1] 最大值对应的索引张量\n",
    "            \n",
    "            \"\"\"\n",
    "            yhat = torch.max(sigma, 1)[1]\n",
    "            correct += torch.sum(yhat == y)\n",
    "            \n",
    "            \n",
    "            if(batch_idx+1) % 125 == 0 or batch_idx == len(batchdata)-1:  # 每N个batch我就打印一次\n",
    "                print('Epoch{}:[{}/{}({:.0f}%)]\\tLoss:{:.6f}\\tAccuracy:{:.3f}'.format(\n",
    "                    epoch+1\n",
    "                    ,samples\n",
    "                    ,epochs*len(batchdata.dataset)\n",
    "                    ,100*samples/(epochs*len(batchdata.dataset))\n",
    "                    ,loss.data.item()\n",
    "                    ,float(100*correct)/samples))\n",
    "            "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0cc84793",
   "metadata": {},
   "source": [
    "# 6、训练与评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "75a421a2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch1:[16000/900000(2%)]\tLoss:0.531203\tAccuracy:69.769\n",
      "Epoch1:[32000/900000(4%)]\tLoss:0.373976\tAccuracy:76.856\n",
      "Epoch1:[48000/900000(5%)]\tLoss:0.344055\tAccuracy:80.029\n",
      "Epoch1:[60000/900000(7%)]\tLoss:0.418852\tAccuracy:81.488\n",
      "Epoch2:[76000/900000(8%)]\tLoss:0.293243\tAccuracy:83.097\n",
      "Epoch2:[92000/900000(10%)]\tLoss:0.251650\tAccuracy:84.154\n",
      "Epoch2:[108000/900000(12%)]\tLoss:0.199835\tAccuracy:84.874\n",
      "Epoch2:[120000/900000(13%)]\tLoss:0.194802\tAccuracy:85.263\n",
      "Epoch3:[136000/900000(15%)]\tLoss:0.309784\tAccuracy:85.940\n",
      "Epoch3:[152000/900000(17%)]\tLoss:0.196271\tAccuracy:86.530\n",
      "Epoch3:[168000/900000(19%)]\tLoss:0.302202\tAccuracy:86.966\n",
      "Epoch3:[180000/900000(20%)]\tLoss:0.145327\tAccuracy:87.221\n",
      "Epoch4:[196000/900000(22%)]\tLoss:0.179523\tAccuracy:87.647\n",
      "Epoch4:[212000/900000(24%)]\tLoss:0.159358\tAccuracy:88.003\n",
      "Epoch4:[228000/900000(25%)]\tLoss:0.276649\tAccuracy:88.301\n",
      "Epoch4:[240000/900000(27%)]\tLoss:0.252588\tAccuracy:88.485\n",
      "Epoch5:[256000/900000(28%)]\tLoss:0.121526\tAccuracy:88.815\n",
      "Epoch5:[272000/900000(30%)]\tLoss:0.171298\tAccuracy:89.047\n",
      "Epoch5:[288000/900000(32%)]\tLoss:0.195644\tAccuracy:89.273\n",
      "Epoch5:[300000/900000(33%)]\tLoss:0.136455\tAccuracy:89.420\n",
      "Epoch6:[316000/900000(35%)]\tLoss:0.176779\tAccuracy:89.690\n",
      "Epoch6:[332000/900000(37%)]\tLoss:0.233144\tAccuracy:89.897\n",
      "Epoch6:[348000/900000(39%)]\tLoss:0.091939\tAccuracy:90.062\n",
      "Epoch6:[360000/900000(40%)]\tLoss:0.185618\tAccuracy:90.190\n",
      "Epoch7:[376000/900000(42%)]\tLoss:0.140425\tAccuracy:90.404\n",
      "Epoch7:[392000/900000(44%)]\tLoss:0.154187\tAccuracy:90.587\n",
      "Epoch7:[408000/900000(45%)]\tLoss:0.176423\tAccuracy:90.741\n",
      "Epoch7:[420000/900000(47%)]\tLoss:0.243956\tAccuracy:90.843\n",
      "Epoch8:[436000/900000(48%)]\tLoss:0.169619\tAccuracy:91.031\n",
      "Epoch8:[452000/900000(50%)]\tLoss:0.098413\tAccuracy:91.188\n",
      "Epoch8:[468000/900000(52%)]\tLoss:0.122408\tAccuracy:91.331\n",
      "Epoch8:[480000/900000(53%)]\tLoss:0.123430\tAccuracy:91.425\n",
      "Epoch9:[496000/900000(55%)]\tLoss:0.074289\tAccuracy:91.585\n",
      "Epoch9:[512000/900000(57%)]\tLoss:0.125678\tAccuracy:91.732\n",
      "Epoch9:[528000/900000(59%)]\tLoss:0.075252\tAccuracy:91.857\n",
      "Epoch9:[540000/900000(60%)]\tLoss:0.189133\tAccuracy:91.944\n",
      "Epoch10:[556000/900000(62%)]\tLoss:0.070906\tAccuracy:92.089\n",
      "Epoch10:[572000/900000(64%)]\tLoss:0.088293\tAccuracy:92.223\n",
      "Epoch10:[588000/900000(65%)]\tLoss:0.117707\tAccuracy:92.339\n",
      "Epoch10:[600000/900000(67%)]\tLoss:0.101134\tAccuracy:92.414\n",
      "Epoch11:[616000/900000(68%)]\tLoss:0.115799\tAccuracy:92.542\n",
      "Epoch11:[632000/900000(70%)]\tLoss:0.057495\tAccuracy:92.662\n",
      "Epoch11:[648000/900000(72%)]\tLoss:0.104543\tAccuracy:92.771\n",
      "Epoch11:[660000/900000(73%)]\tLoss:0.126425\tAccuracy:92.839\n",
      "Epoch12:[676000/900000(75%)]\tLoss:0.117190\tAccuracy:92.956\n",
      "Epoch12:[692000/900000(77%)]\tLoss:0.017965\tAccuracy:93.059\n",
      "Epoch12:[708000/900000(79%)]\tLoss:0.050768\tAccuracy:93.156\n",
      "Epoch12:[720000/900000(80%)]\tLoss:0.081478\tAccuracy:93.220\n",
      "Epoch13:[736000/900000(82%)]\tLoss:0.036969\tAccuracy:93.333\n",
      "Epoch13:[752000/900000(84%)]\tLoss:0.046563\tAccuracy:93.433\n",
      "Epoch13:[768000/900000(85%)]\tLoss:0.075606\tAccuracy:93.522\n",
      "Epoch13:[780000/900000(87%)]\tLoss:0.047569\tAccuracy:93.583\n",
      "Epoch14:[796000/900000(88%)]\tLoss:0.055025\tAccuracy:93.678\n",
      "Epoch14:[812000/900000(90%)]\tLoss:0.048077\tAccuracy:93.766\n",
      "Epoch14:[828000/900000(92%)]\tLoss:0.073043\tAccuracy:93.844\n",
      "Epoch14:[840000/900000(93%)]\tLoss:0.036736\tAccuracy:93.898\n",
      "Epoch15:[856000/900000(95%)]\tLoss:0.042932\tAccuracy:93.980\n",
      "Epoch15:[872000/900000(97%)]\tLoss:0.043161\tAccuracy:94.060\n",
      "Epoch15:[888000/900000(99%)]\tLoss:0.018482\tAccuracy:94.133\n",
      "Epoch15:[900000/900000(100%)]\tLoss:0.083485\tAccuracy:94.184\n"
     ]
    }
   ],
   "source": [
    "torch.manual_seed(1)\n",
    "# 实例化\n",
    "net = Model(in_features=input_,out_features=output_)\n",
    "\n",
    "fit(net,batchdata,lr=lr,epochs=epochs,gamma=gamma)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1c35cd1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf02d890",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8020fe32",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0280798f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
