{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "9577daa4-2b47-4382-aa49-5e42bbfc493c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "[1,  2000] loss: 2.099\n",
      "[1,  4000] loss: 1.894\n",
      "[1,  6000] loss: 1.766\n",
      "[1,  8000] loss: 1.657\n",
      "[1, 10000] loss: 1.549\n",
      "[1, 12000] loss: 1.452\n",
      "Saving epoch 1 model ...\n",
      "[2,  2000] loss: 1.356\n",
      "[2,  4000] loss: 1.301\n",
      "[2,  6000] loss: 1.218\n",
      "[2,  8000] loss: 1.194\n",
      "[2, 10000] loss: 1.158\n",
      "[2, 12000] loss: 1.115\n",
      "Saving epoch 2 model ...\n",
      "[3,  2000] loss: 1.036\n",
      "[3,  4000] loss: 1.017\n",
      "[3,  6000] loss: 0.985\n",
      "[3,  8000] loss: 0.960\n",
      "[3, 10000] loss: 0.949\n",
      "[3, 12000] loss: 0.921\n",
      "Saving epoch 3 model ...\n",
      "[4,  2000] loss: 0.842\n",
      "[4,  4000] loss: 0.846\n",
      "[4,  6000] loss: 0.819\n",
      "[4,  8000] loss: 0.821\n",
      "[4, 10000] loss: 0.813\n",
      "[4, 12000] loss: 0.769\n",
      "Saving epoch 4 model ...\n",
      "[5,  2000] loss: 0.706\n",
      "[5,  4000] loss: 0.690\n",
      "[5,  6000] loss: 0.702\n",
      "[5,  8000] loss: 0.692\n",
      "[5, 10000] loss: 0.720\n",
      "[5, 12000] loss: 0.669\n",
      "Saving epoch 5 model ...\n",
      "Finished Training\n"
     ]
    }
   ],
   "source": [
    "########################################\n",
    "#第1步：载入数据\n",
    "########################################\n",
    "import torch\n",
    "import torchvision \n",
    "import torchvision.transforms as transforms\n",
    "#使用torchvision可以很方便地下载cifar10数据集，而torchvision下载的数据集为[0, 1]的PILImage格式，我们需要将张量Tensor归一化到[-1, 1]\n",
    "\n",
    "import os\n",
    "import os.path\n",
    "\n",
    "transform = transforms.Compose(\n",
    "    [transforms.ToTensor(), #将PILImage转换为张量\n",
    "     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))] #将[0, 1]归一化到[-1, 1]\n",
    "     )\n",
    "\n",
    "trainset = torchvision.datasets.CIFAR10(root='/data/project/python/torch/data/cifar10', #root表示cifar10的数据存放目录，使用torchvision可直接下载cifar10数据集，也可直接在https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz这里下载（链接来自cifar10官网）\n",
    "                                        train=True,\n",
    "                                        download=True, \n",
    "                                        transform=transform #按照上面定义的transform格式转换下载的数据\n",
    "                                        )\n",
    "trainloader = torch.utils.data.DataLoader(trainset, \n",
    "                                          batch_size=4, #每个batch载入的图片数量，默认为1\n",
    "                                          shuffle=True, \n",
    "                                          num_workers=2 #载入训练数据所需的子任务数\n",
    "                                          )\n",
    "\n",
    "testset = torchvision.datasets.CIFAR10(root='/data/project/python/torch/data/cifar10', \n",
    "                                       train=False,\n",
    "                                       download=True, \n",
    "                                       transform=transform)\n",
    "testloader = torch.utils.data.DataLoader(testset, \n",
    "                                         batch_size=4,\n",
    "                                         shuffle=False, \n",
    "                                         num_workers=2)\n",
    "\n",
    "cifar10_classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n",
    "\n",
    "########################################\n",
    "#查看训练数据\n",
    "#备注：该部分代码可以不放入主函数\n",
    "########################################\n",
    "# import numpy as np\n",
    "\n",
    "# dataiter = iter(trainloader) #随机从训练数据中取一些数据\n",
    "# images, labels = next(dataiter) \n",
    "# images.shape #(4L, 3L, 32L, 32L)  \n",
    "# #我们可以看到images的shape是4*3*32*32，原因是上面载入训练数据trainloader时一个batch里面有4张图片\n",
    "\n",
    "# torchvision.utils.save_image(images[1],\"test.jpg\") #我们仅随机保存images中的一张图片看看\n",
    "# cifar10_classes[labels[j]] #打印label\n",
    "\n",
    "########################################\n",
    "#第2步：构建卷积神经网络\n",
    "########################################\n",
    "import math\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "cfg = {'VGG16':[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M']}\n",
    "\n",
    "class VGG(nn.Module):\n",
    "    def __init__(self, net_name):\n",
    "        super(VGG, self).__init__()\n",
    "        \n",
    "        #构建网络的卷积层和池化层，最终输出命名features，原因是通常认为经过这些操作的输出为包含图像空间信息的特征层\n",
    "        self.features = self._make_layers(cfg[net_name])\n",
    "        \n",
    "        #构建卷积层之后的全连接层以及分类器\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Dropout(),\n",
    "            nn.Linear(512, 512), #fc1\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(),\n",
    "            nn.Linear(512, 512), #fc2\n",
    "            nn.ReLU(True),\n",
    "            nn.Linear(512, 10), #fc3，最终cifar10的输出是10类\n",
    "        )\n",
    "        #初始化权重\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
    "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
    "                m.bias.data.zero_()\n",
    "    \n",
    "    def forward(self, x):\n",
    "        x = self.features(x) #前向传播的时候先经过卷积层和池化层\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.classifier(x) #再将features（得到网络输出的特征层）的结果拼接到分类器上\n",
    "        return x\n",
    "    \n",
    "    def _make_layers(self, cfg):\n",
    "        layers = []\n",
    "        in_channels = 3\n",
    "        for v in cfg:\n",
    "            if v == 'M':\n",
    "                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n",
    "            else:\n",
    "                #conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)\n",
    "                #layers += [conv2d, nn.ReLU(inplace=True)]\n",
    "                layers += [nn.Conv2d(in_channels, v, kernel_size=3, padding=1),\n",
    "                           nn.BatchNorm2d(v),\n",
    "                           nn.ReLU(inplace=True)]\n",
    "                in_channels = v\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "net = VGG('VGG16')\n",
    "\n",
    "########################################\n",
    "#第3步：定义损失函数和优化方法\n",
    "########################################\n",
    "import torch.optim as optim\n",
    "\n",
    "#x = torch.randn(2,3,32,32)\n",
    "#y = net(x)\n",
    "#print(y.size())\n",
    "criterion = nn.CrossEntropyLoss() #定义损失函数：交叉熵\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) #定义优化方法：随机梯度下降\n",
    "\n",
    "########################################\n",
    "#第4步：卷积神经网络的训练\n",
    "########################################\n",
    "for epoch in range(5):  #训练数据集的迭代次数，这里cifar10数据集将迭代2次\n",
    "    train_loss = 0.0\n",
    "    for batch_idx, data in enumerate(trainloader, 0):\n",
    "        #初始化\n",
    "        inputs, labels = data #获取数据\n",
    "        optimizer.zero_grad() #先将梯度置为0\n",
    "        \n",
    "        #优化过程\n",
    "        outputs = net(inputs) #将数据输入到网络，得到第一轮网络前向传播的预测结果outputs\n",
    "        loss = criterion(outputs, labels) #预测结果outputs和labels通过之前定义的交叉熵计算损失\n",
    "        loss.backward() #误差反向传播\n",
    "        optimizer.step() #随机梯度下降方法（之前定义）优化权重\n",
    "        \n",
    "        #查看网络训练状态\n",
    "        train_loss += loss.item()\n",
    "        if batch_idx % 2000 == 1999: #每迭代2000个batch打印看一次当前网络收敛情况\n",
    "            print('[%d, %5d] loss: %.3f' % (epoch + 1, batch_idx + 1, train_loss / 2000))\n",
    "            train_loss = 0.0\n",
    "    \n",
    "    print('Saving epoch %d model ...' % (epoch + 1))\n",
    "    state = {\n",
    "        'net': net.state_dict(),\n",
    "        'epoch': epoch + 1,\n",
    "    }\n",
    "    if not os.path.isdir('checkpoint'):\n",
    "        os.mkdir('checkpoint')\n",
    "    \n",
    "    torch.save(state, '/data/project/python/torch/result/vgg16/checkpoint/cifar10_epoch_%d.ckpt' % (epoch+1))\n",
    "\n",
    "print('Finished Training')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0ac88413-36f1-4073-bfe2-5b81372055d3",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
