{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "cifar10_lenet5.ipynb",
      "provenance": [],
      "collapsed_sections": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "code",
      "metadata": {
        "id": "dzdcSyvchSPB",
        "outputId": "860f6fab-3b82-4d69-ccab-0b8234706ddc",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 107
        }
      },
      "source": [
        "! pip3 install torch torchvision"
      ],
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Requirement already satisfied: torch in /usr/local/lib/python3.6/dist-packages (1.6.0+cu101)\n",
            "Requirement already satisfied: torchvision in /usr/local/lib/python3.6/dist-packages (0.7.0+cu101)\n",
            "Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch) (0.16.0)\n",
            "Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from torch) (1.18.5)\n",
            "Requirement already satisfied: pillow>=4.1.1 in /usr/local/lib/python3.6/dist-packages (from torchvision) (7.0.0)\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sBC4u8D7JGbV"
      },
      "source": [
        "import  torch\n",
        "from    torch import nn\n",
        "from    torch.nn import functional as F\n",
        "\n",
        "#Lenet:2层卷积+3层线性\n",
        "class Lenet5(nn.Module):\n",
        "    \"\"\"\n",
        "    for cifar10 dataset.\n",
        "    \"\"\"\n",
        "    def __init__(self):\n",
        "        '''构造函数，定义网络的结构'''\n",
        "        super(Lenet5, self).__init__()\n",
        "\n",
        "#1.第一层C1是一个卷积层\n",
        "#         输入图片: 32*32\n",
        "#         卷积核大小: 5*5\n",
        "#         卷积核种类: 6\n",
        "#         输出feature map大小:28*28(32-5+1)\n",
        "#         神经元数量:28*28*6\n",
        "#         可训练参数数量:(5*5+1)*6，(每个卷积核25个权重值w，一个截距值bias;总共6个卷积核)\n",
        "#         连接数量:(5*5+1)*6*28*28\n",
        "# 2.第二层S2是一个下采样层(池化层):\n",
        "#     输入:28*28\n",
        "#     采样区域:2*2\n",
        "#     采样方式:4个输入相加，乘以一个可训练参数，再加上一个可训练偏置，结果通过sigmoid。(论文原文是这样描述，但是实际中，我看到一般都是用最大池化)\n",
        "#     种类数量：6\n",
        "#     输出的feature map大小时:14*14(28/2)\n",
        "#     神经元数量:14*14*6\n",
        "#     可训练参数:2*6(和的权重w和偏置bias,然后乘以6)\n",
        "#     连接数:(2*2+1)*6*14*14\n",
        "        \n",
        "        self.conv_unit = nn.Sequential(\n",
        "            #定义卷积层，3个输入通道，6个输出通道，5*5的卷积filter，外层补上了两圈0,因为输入的是32*32\n",
        "            # x: [b, 3, 32, 32] => [b, 16, ]\n",
        "            nn.Conv2d(3, 6, kernel_size=5, stride=1, padding=0),\n",
        "            nn.MaxPool2d(kernel_size=2, stride=2, padding=0),\n",
        "            #第二个卷积层，6个输入，16个输出，5*5的卷积filter\n",
        "            nn.Conv2d(6, 16, kernel_size=5, stride=1, padding=0),\n",
        "            nn.MaxPool2d(kernel_size=2, stride=2, padding=0),\n",
        "            #\n",
        "        )\n",
        "        \n",
        "        # flatten\n",
        "        #最后是三个全连接层\n",
        "        self.fc_unit = nn.Sequential(\n",
        "            nn.Linear(16*5*5, 120),\n",
        "            nn.ReLU(),\n",
        "            nn.Linear(120, 84),\n",
        "            nn.ReLU(),\n",
        "            nn.Linear(84, 10)\n",
        "        )\n",
        "\n",
        "\n",
        "        # [b, 3, 32, 32]\n",
        "        tmp = torch.randn(2, 3, 32, 32)\n",
        "        out = self.conv_unit(tmp)\n",
        "        # [b, 16, 5, 5]\n",
        "        print('conv out:', out.shape)\n",
        "\n",
        "\n",
        "    #前向传播函数\n",
        "    def forward(self, x):\n",
        "        \"\"\"\n",
        "\n",
        "        :param x: [b, 3, 32, 32]\n",
        "        :return:\n",
        "        \"\"\"\n",
        "        #input x的第一个维度就是batch size\n",
        "        batchsz = x.size(0)\n",
        "        # [b, 3, 32, 32] => [b, 16, 5, 5]\n",
        "        x = self.conv_unit(x)\n",
        "        \n",
        "        # [b, 16, 5, 5] => [b, 16*5*5]  拉平\n",
        "        x = x.view(batchsz, 16*5*5)\n",
        "        # [b, 16*5*5] => [b, 10]\n",
        "        logits = self.fc_unit(x)\n",
        "\n",
        "        return logits\n"
      ],
      "execution_count": 16,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "OwYnqYKzJjpW"
      },
      "source": [
        "def main():\n",
        "    #batch size设为128\n",
        "    batchsz = 128\n",
        "    \n",
        "    #epochs size\n",
        "    epochs = 10\n",
        "    \n",
        "    #第一个参数：表示cifar10数据的加载的相对目录\n",
        "    #第二个参数:表示是否是训练集\n",
        "    #transform：表示是否需要对数据进行预处理。\n",
        "    #downlaod：是否自动下载数据集\n",
        "    cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([  #用Compose把多个步骤整合到一起；\n",
        "        transforms.Resize((32, 32)),     #把给定的图片resize到given size\n",
        "        transforms.ToTensor(), #convert a PIL image to tensor (H*W*C) in range [0,255] to a torch.Tensor(C*H*W) in the range [0.0,1.0]\n",
        "        transforms.Normalize(mean=[0.485, 0.456, 0.406],   #Normalized an tensor image with mean and standard deviation\n",
        "                             std=[0.229, 0.224, 0.225])\n",
        "    ]), download=True)\n",
        "    \n",
        "    print(\"训练集样本数:{}\".format(len(cifar_train)))\n",
        "    \n",
        "    #将dataloader对象转换成一个可迭代对象\n",
        "    cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)\n",
        "    \n",
        "    #len(cifar_train) = 样本数 /batchsize  if 样本数 % batchsize == 0 else 样本数 /batchsize + 1\n",
        "    print(\"一个epoch会被执行多少个batch:{}\".format(len(cifar_train)))\n",
        "    \n",
        "    cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([\n",
        "        transforms.Resize((32, 32)),\n",
        "        transforms.ToTensor(),\n",
        "        transforms.Normalize(mean=[0.485, 0.456, 0.406],\n",
        "                             std=[0.229, 0.224, 0.225])\n",
        "    ]), download=True)\n",
        "    cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)\n",
        "\n",
        "\n",
        "    x, label = iter(cifar_train).next()\n",
        "    #x:[batchsize, chanel, height, width]  [128,3,32,32]\n",
        "    #label:[batchsize]  [128]\n",
        "    print('x:', x.shape, 'label:', label.shape)\n",
        "    \n",
        "    #cuda设备的对象\n",
        "    device = torch.device('cuda')\n",
        "    \n",
        "    #将model转换为cuda的格式\n",
        "    model = Lenet5().to(device)\n",
        "    #model = ResNet18().to(device)\n",
        "    \n",
        "    #nn.CrossEntropyLoss()是nn.logSoftmax()和nn.NLLLoss()的整合,可以直接使用它来替换网络中的这两个操作\n",
        "    criteon = nn.CrossEntropyLoss().to(device)\n",
        "    \n",
        "    #构造一个adam的优化器对象Optimizer，第一个参数是需要优化的参数，是个列表对象；lr:学习率\n",
        "    optimizer = optim.Adam(model.parameters(), lr=1e-3)\n",
        "\n",
        "    #打印模型结构\n",
        "    print(model)\n",
        "    \n",
        "    #迭代进行训练\n",
        "    for epoch in range(epochs):\n",
        "        #启用 BatchNormalization 和 Dropout\n",
        "        model.train()\n",
        "        for batchidx, (x, label) in enumerate(cifar_train):\n",
        "            # [b, 3, 32, 32]\n",
        "            # [b]\n",
        "            #tensor格式化成cuda形式\n",
        "            x, label = x.to(device), label.to(device)\n",
        "\n",
        "            #前向传播\n",
        "            logits = model(x)\n",
        "            \n",
        "            #计算损失\n",
        "            # logits: [b, 10]\n",
        "            # label:  [b]\n",
        "            # loss: tensor scalar\n",
        "            loss = criteon(logits, label)\n",
        "\n",
        "            # 梯度清0\n",
        "            optimizer.zero_grad()\n",
        "            \n",
        "            #后向传播\n",
        "            loss.backward()\n",
        "            \n",
        "            #权重参数更新\n",
        "            optimizer.step()\n",
        "\n",
        "        #loss现在是一个零维的标量，使用loss.item()可以从标量中获取Python数字\n",
        "        print(epoch, 'loss:', loss.item())\n",
        "\n",
        "        #不启用 BatchNormalization 和 Dropout；在model(test)之前，需要加上model.eval()，否则的话，有输入数据，即使不训练，它也会改变权值\n",
        "        model.eval()\n",
        "        \n",
        "        #torch.no_grad() 是一个上下文管理器，被该语句 wrap 起来的部分将不会track 梯度\n",
        "        with torch.no_grad():\n",
        "            # test\n",
        "            total_correct = 0\n",
        "            total_num = 0\n",
        "            for x, label in cifar_test:\n",
        "                # [b, 3, 32, 32]\n",
        "                # [b]\n",
        "                x, label = x.to(device), label.to(device)\n",
        "\n",
        "                # [b, 10]\n",
        "                logits = model(x)\n",
        "                #输出的是一个[b,10]，每个元素的值是数值，例：[ 3.4562,  2.4342, -2.6286, -6.3210, -2.7691, -7.9415, -6.3638, -6.5790, 5.2641,  0.7132]\n",
        "                #print(\"logits:\",logits)\n",
        "                # [b]\n",
        "\n",
        "                #pred：值最大元素的下标，作为分类结果\n",
        "                pred = logits.argmax(dim=1)\n",
        "\n",
        "                #print(\"pred={},shape={}\".format(pred,pred.shape))\n",
        "                # [b] vs [b] => scalar tensor\n",
        "                \n",
        "                #预测和实际结果相等的个数计算\n",
        "                correct = torch.eq(pred, label).float().sum().item()\n",
        "\n",
        "                total_correct += correct\n",
        "                total_num += x.size(0)\n",
        "                # print(correct)\n",
        "\n",
        "            #准确率\n",
        "            acc = total_correct / total_num\n",
        "            print(epoch, 'test acc:', acc)"
      ],
      "execution_count": 17,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wjJJ4ZhGJOz5",
        "outputId": "e3009b5e-13d6-4373-c4e0-475178e7debb",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 755
        }
      },
      "source": [
        "import  torch\n",
        "from    torch.utils.data import DataLoader\n",
        "from    torchvision import datasets\n",
        "from    torchvision import transforms\n",
        "from    torch import nn, optim\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    main()"
      ],
      "execution_count": 18,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Files already downloaded and verified\n",
            "训练集样本数:50000\n",
            "一个epoch会被执行多少个batch:391\n",
            "Files already downloaded and verified\n",
            "x: torch.Size([128, 3, 32, 32]) label: torch.Size([128])\n",
            "conv out: torch.Size([2, 16, 5, 5])\n",
            "Lenet5(\n",
            "  (conv_unit): Sequential(\n",
            "    (0): Conv2d(3, 6, kernel_size=(5, 5), stride=(1, 1))\n",
            "    (1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
            "    (2): Conv2d(6, 16, kernel_size=(5, 5), stride=(1, 1))\n",
            "    (3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
            "  )\n",
            "  (fc_unit): Sequential(\n",
            "    (0): Linear(in_features=400, out_features=120, bias=True)\n",
            "    (1): ReLU()\n",
            "    (2): Linear(in_features=120, out_features=84, bias=True)\n",
            "    (3): ReLU()\n",
            "    (4): Linear(in_features=84, out_features=10, bias=True)\n",
            "  )\n",
            ")\n",
            "0 loss: 1.4444891214370728\n",
            "0 test acc: 0.4858\n",
            "1 loss: 1.246394395828247\n",
            "1 test acc: 0.5488\n",
            "2 loss: 1.2523612976074219\n",
            "2 test acc: 0.5788\n",
            "3 loss: 0.8204976916313171\n",
            "3 test acc: 0.5938\n",
            "4 loss: 0.9428863525390625\n",
            "4 test acc: 0.6139\n",
            "5 loss: 0.7900842428207397\n",
            "5 test acc: 0.6217\n",
            "6 loss: 0.7483559846878052\n",
            "6 test acc: 0.6202\n",
            "7 loss: 0.6521503925323486\n",
            "7 test acc: 0.6375\n",
            "8 loss: 0.9589999914169312\n",
            "8 test acc: 0.6323\n",
            "9 loss: 0.7853254079818726\n",
            "9 test acc: 0.6388\n"
          ],
          "name": "stdout"
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ZkmpxtqRs6JJ"
      },
      "source": [
        ""
      ]
    }
  ]
}