{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 初始化 \n",
    "导入数据和导入相应的包。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.2.0\n"
     ]
    }
   ],
   "source": [
    "import torch \n",
    "import numpy as np\n",
    "import torch.nn as nn\n",
    "import torchvision\n",
    "import matplotlib.pyplot as plt\n",
    "import torch.optim as optim\n",
    "print(torch.__version__)\n",
    "torch.set_default_tensor_type('torch.FloatTensor')\n",
    "device = torch.device('cuda')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 查看观察导入的 Cifar-10 数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pickle\n",
    "def LoadData(path):\n",
    "    with open(path,'rb') as f:\n",
    "        datadict = pickle.load(f,encoding = 'latin1')\n",
    "        X = datadict['data']\n",
    "        X = X.reshape(10000,3,32,32)\n",
    "        Y = datadict['labels']\n",
    "        Y = np.array(Y)\n",
    "    return X, Y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_path = ['cifar10/data_batch_%s' % i for i in range(1,6)]\n",
    "test_path = 'cifar10/test_batch'\n",
    "\n",
    "train_data = None\n",
    "train_lable = None\n",
    "val_data = None\n",
    "val_label = None\n",
    "test_data = None\n",
    "test_label = None\n",
    "\n",
    "for path in train_path:\n",
    "    X, y = LoadData(path)\n",
    "    if path == 'cifar10/data_batch_1':\n",
    "        train_data = X\n",
    "        train_label = y\n",
    "    else:\n",
    "        train_data = np.vstack((train_data, X))\n",
    "        train_label = np.hstack((train_label, y))\n",
    "val_data = torch.tensor(train_data[49000:50000,:]).type(torch.FloatTensor)\n",
    "val_label = torch.tensor(train_label[49000:50000]).type(torch.LongTensor)\n",
    "train_data = torch.tensor(train_data[0:49000,:]).type(torch.FloatTensor)\n",
    "train_label = torch.tensor(train_label[0:49000]).type(torch.LongTensor)\n",
    "test_data, test_label = LoadData(test_path)\n",
    "test_data = torch.tensor(test_data).type(torch.FloatTensor)\n",
    "test_label = torch.tensor(test_label).type(torch.LongTensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.utils.data as Data\n",
    "\n",
    "batch_size = 64\n",
    "\n",
    "train_set = Data.TensorDataset(train_data, train_label)\n",
    "val_set = Data.TensorDataset(val_data, val_label)\n",
    "test_set = Data.TensorDataset(test_data, test_label)\n",
    "\n",
    "data_iter = Data.DataLoader(\n",
    "    dataset=train_set,\n",
    "    batch_size=batch_size,\n",
    "    shuffle=True,\n",
    "    num_workers=0,\n",
    ")\n",
    "\n",
    "val_iter = Data.DataLoader(\n",
    "    dataset=val_set,\n",
    "    batch_size=batch_size,\n",
    "    shuffle=True,\n",
    "    num_workers=0,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 查看网络\n",
    "    查看网络结构和参数量，并且测试网络的输入输出是否正确"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Flatten(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(Flatten, self).__init__()\n",
    "\n",
    "    def forward(self, input):\n",
    "        return input.view(input.size(0), -1).squeeze()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def vgg_block(num_convs, in_channels, out_channels):\n",
    "    blk = []\n",
    "    for i in range(num_convs):\n",
    "        if i == 0:\n",
    "            blk.append(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1))\n",
    "        else:\n",
    "            blk.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))\n",
    "        blk.append(nn.ReLU())\n",
    "    blk.append(nn.MaxPool2d(kernel_size=2, stride=2))\n",
    "    return nn.Sequential(*blk)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "conv_arch = ((2, 3, 64), (2, 64, 128), (3, 128, 256), (3, 256, 512), (3, 512, 512))\n",
    "fc_features = 512 # 根据卷积层的输出算出来的\n",
    "fc_hidden_units = 4096 # 任意\n",
    "\n",
    "class VGG16(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(VGG16, self).__init__()   \n",
    "        \n",
    "    def forward(self,conv_arch, fc_features, fc_hidden_units=4096):\n",
    "        net = nn.Sequential()\n",
    "        # 卷积层\n",
    "        for i, (num_convs, in_channels, out_channels) in enumerate(conv_arch):\n",
    "            net.add_module('vgg_block_' + str(i+1), vgg_block(num_convs, in_channels, out_channels))\n",
    "            \n",
    "        # 全连接层\n",
    "        net.add_module(\n",
    "            'fc',\n",
    "            nn.Sequential(\n",
    "                Flatten(),\n",
    "                nn.Linear(fc_features, fc_hidden_units),\n",
    "                nn.ReLU(),\n",
    "                nn.Dropout(0.5),\n",
    "                nn.Linear(fc_hidden_units, fc_hidden_units),\n",
    "                nn.ReLU(),\n",
    "                nn.Dropout(0.5),\n",
    "                nn.Linear(fc_hidden_units,10),\n",
    "            )\n",
    "        )\n",
    "        return net"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Sequential(\n",
      "  (vgg_block_1): Sequential(\n",
      "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (3): ReLU()\n",
      "    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (vgg_block_2): Sequential(\n",
      "    (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (3): ReLU()\n",
      "    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (vgg_block_3): Sequential(\n",
      "    (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (3): ReLU()\n",
      "    (4): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (5): ReLU()\n",
      "    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (vgg_block_4): Sequential(\n",
      "    (0): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (3): ReLU()\n",
      "    (4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (5): ReLU()\n",
      "    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (vgg_block_5): Sequential(\n",
      "    (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU()\n",
      "    (2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (3): ReLU()\n",
      "    (4): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (5): ReLU()\n",
      "    (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (fc): Sequential(\n",
      "    (0): Flatten()\n",
      "    (1): Linear(in_features=512, out_features=4096, bias=True)\n",
      "    (2): ReLU()\n",
      "    (3): Dropout(p=0.5, inplace=False)\n",
      "    (4): Linear(in_features=4096, out_features=4096, bias=True)\n",
      "    (5): ReLU()\n",
      "    (6): Dropout(p=0.5, inplace=False)\n",
      "    (7): Linear(in_features=4096, out_features=10, bias=True)\n",
      "  )\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "net = VGG16()\n",
    "model = net.forward(conv_arch=conv_arch, fc_features=fc_features, fc_hidden_units=4096)\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "======================================================================================\n",
      "                                Kernel Shape      Output Shape      Params  \\\n",
      "Layer                                                                        \n",
      "0_vgg_block_1.Conv2d_0         [3, 64, 3, 3]   [1, 64, 32, 32]      1.792k   \n",
      "1_vgg_block_1.ReLU_1                       -   [1, 64, 32, 32]           -   \n",
      "2_vgg_block_1.Conv2d_2        [64, 64, 3, 3]   [1, 64, 32, 32]     36.928k   \n",
      "3_vgg_block_1.ReLU_3                       -   [1, 64, 32, 32]           -   \n",
      "4_vgg_block_1.MaxPool2d_4                  -   [1, 64, 16, 16]           -   \n",
      "5_vgg_block_2.Conv2d_0       [64, 128, 3, 3]  [1, 128, 16, 16]     73.856k   \n",
      "6_vgg_block_2.ReLU_1                       -  [1, 128, 16, 16]           -   \n",
      "7_vgg_block_2.Conv2d_2      [128, 128, 3, 3]  [1, 128, 16, 16]    147.584k   \n",
      "8_vgg_block_2.ReLU_3                       -  [1, 128, 16, 16]           -   \n",
      "9_vgg_block_2.MaxPool2d_4                  -    [1, 128, 8, 8]           -   \n",
      "10_vgg_block_3.Conv2d_0     [128, 256, 3, 3]    [1, 256, 8, 8]    295.168k   \n",
      "11_vgg_block_3.ReLU_1                      -    [1, 256, 8, 8]           -   \n",
      "12_vgg_block_3.Conv2d_2     [256, 256, 3, 3]    [1, 256, 8, 8]     590.08k   \n",
      "13_vgg_block_3.ReLU_3                      -    [1, 256, 8, 8]           -   \n",
      "14_vgg_block_3.Conv2d_4     [256, 256, 3, 3]    [1, 256, 8, 8]     590.08k   \n",
      "15_vgg_block_3.ReLU_5                      -    [1, 256, 8, 8]           -   \n",
      "16_vgg_block_3.MaxPool2d_6                 -    [1, 256, 4, 4]           -   \n",
      "17_vgg_block_4.Conv2d_0     [256, 512, 3, 3]    [1, 512, 4, 4]    1.18016M   \n",
      "18_vgg_block_4.ReLU_1                      -    [1, 512, 4, 4]           -   \n",
      "19_vgg_block_4.Conv2d_2     [512, 512, 3, 3]    [1, 512, 4, 4]   2.359808M   \n",
      "20_vgg_block_4.ReLU_3                      -    [1, 512, 4, 4]           -   \n",
      "21_vgg_block_4.Conv2d_4     [512, 512, 3, 3]    [1, 512, 4, 4]   2.359808M   \n",
      "22_vgg_block_4.ReLU_5                      -    [1, 512, 4, 4]           -   \n",
      "23_vgg_block_4.MaxPool2d_6                 -    [1, 512, 2, 2]           -   \n",
      "24_vgg_block_5.Conv2d_0     [512, 512, 3, 3]    [1, 512, 2, 2]   2.359808M   \n",
      "25_vgg_block_5.ReLU_1                      -    [1, 512, 2, 2]           -   \n",
      "26_vgg_block_5.Conv2d_2     [512, 512, 3, 3]    [1, 512, 2, 2]   2.359808M   \n",
      "27_vgg_block_5.ReLU_3                      -    [1, 512, 2, 2]           -   \n",
      "28_vgg_block_5.Conv2d_4     [512, 512, 3, 3]    [1, 512, 2, 2]   2.359808M   \n",
      "29_vgg_block_5.ReLU_5                      -    [1, 512, 2, 2]           -   \n",
      "30_vgg_block_5.MaxPool2d_6                 -    [1, 512, 1, 1]           -   \n",
      "31_fc.Flatten_0                            -             [512]           -   \n",
      "32_fc.Linear_1                   [512, 4096]            [4096]   2.101248M   \n",
      "33_fc.ReLU_2                               -            [4096]           -   \n",
      "34_fc.Dropout_3                            -            [4096]           -   \n",
      "35_fc.Linear_4                  [4096, 4096]            [4096]  16.781312M   \n",
      "36_fc.ReLU_5                               -            [4096]           -   \n",
      "37_fc.Dropout_6                            -            [4096]           -   \n",
      "38_fc.Linear_7                    [4096, 10]              [10]      40.97k   \n",
      "\n",
      "                             Mult-Adds  \n",
      "Layer                                   \n",
      "0_vgg_block_1.Conv2d_0       1.769472M  \n",
      "1_vgg_block_1.ReLU_1                 -  \n",
      "2_vgg_block_1.Conv2d_2      37.748736M  \n",
      "3_vgg_block_1.ReLU_3                 -  \n",
      "4_vgg_block_1.MaxPool2d_4            -  \n",
      "5_vgg_block_2.Conv2d_0      18.874368M  \n",
      "6_vgg_block_2.ReLU_1                 -  \n",
      "7_vgg_block_2.Conv2d_2      37.748736M  \n",
      "8_vgg_block_2.ReLU_3                 -  \n",
      "9_vgg_block_2.MaxPool2d_4            -  \n",
      "10_vgg_block_3.Conv2d_0     18.874368M  \n",
      "11_vgg_block_3.ReLU_1                -  \n",
      "12_vgg_block_3.Conv2d_2     37.748736M  \n",
      "13_vgg_block_3.ReLU_3                -  \n",
      "14_vgg_block_3.Conv2d_4     37.748736M  \n",
      "15_vgg_block_3.ReLU_5                -  \n",
      "16_vgg_block_3.MaxPool2d_6           -  \n",
      "17_vgg_block_4.Conv2d_0     18.874368M  \n",
      "18_vgg_block_4.ReLU_1                -  \n",
      "19_vgg_block_4.Conv2d_2     37.748736M  \n",
      "20_vgg_block_4.ReLU_3                -  \n",
      "21_vgg_block_4.Conv2d_4     37.748736M  \n",
      "22_vgg_block_4.ReLU_5                -  \n",
      "23_vgg_block_4.MaxPool2d_6           -  \n",
      "24_vgg_block_5.Conv2d_0      9.437184M  \n",
      "25_vgg_block_5.ReLU_1                -  \n",
      "26_vgg_block_5.Conv2d_2      9.437184M  \n",
      "27_vgg_block_5.ReLU_3                -  \n",
      "28_vgg_block_5.Conv2d_4      9.437184M  \n",
      "29_vgg_block_5.ReLU_5                -  \n",
      "30_vgg_block_5.MaxPool2d_6           -  \n",
      "31_fc.Flatten_0                      -  \n",
      "32_fc.Linear_1               2.097152M  \n",
      "33_fc.ReLU_2                         -  \n",
      "34_fc.Dropout_3                      -  \n",
      "35_fc.Linear_4              16.777216M  \n",
      "36_fc.ReLU_5                         -  \n",
      "37_fc.Dropout_6                      -  \n",
      "38_fc.Linear_7                  40.96k  \n",
      "--------------------------------------------------------------------------------------\n",
      "                           Totals\n",
      "Total params           33.638218M\n",
      "Trainable params       33.638218M\n",
      "Non-trainable params          0.0\n",
      "Mult-Adds             332.111872M\n",
      "======================================================================================\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>Kernel Shape</th>\n",
       "      <th>Output Shape</th>\n",
       "      <th>Params</th>\n",
       "      <th>Mult-Adds</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Layer</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0_vgg_block_1.Conv2d_0</th>\n",
       "      <td>[3, 64, 3, 3]</td>\n",
       "      <td>[1, 64, 32, 32]</td>\n",
       "      <td>1792.0</td>\n",
       "      <td>1769472.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1_vgg_block_1.ReLU_1</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 64, 32, 32]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2_vgg_block_1.Conv2d_2</th>\n",
       "      <td>[64, 64, 3, 3]</td>\n",
       "      <td>[1, 64, 32, 32]</td>\n",
       "      <td>36928.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3_vgg_block_1.ReLU_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 64, 32, 32]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4_vgg_block_1.MaxPool2d_4</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 64, 16, 16]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5_vgg_block_2.Conv2d_0</th>\n",
       "      <td>[64, 128, 3, 3]</td>\n",
       "      <td>[1, 128, 16, 16]</td>\n",
       "      <td>73856.0</td>\n",
       "      <td>18874368.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6_vgg_block_2.ReLU_1</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 128, 16, 16]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7_vgg_block_2.Conv2d_2</th>\n",
       "      <td>[128, 128, 3, 3]</td>\n",
       "      <td>[1, 128, 16, 16]</td>\n",
       "      <td>147584.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8_vgg_block_2.ReLU_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 128, 16, 16]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9_vgg_block_2.MaxPool2d_4</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 128, 8, 8]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10_vgg_block_3.Conv2d_0</th>\n",
       "      <td>[128, 256, 3, 3]</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>295168.0</td>\n",
       "      <td>18874368.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11_vgg_block_3.ReLU_1</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12_vgg_block_3.Conv2d_2</th>\n",
       "      <td>[256, 256, 3, 3]</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>590080.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13_vgg_block_3.ReLU_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14_vgg_block_3.Conv2d_4</th>\n",
       "      <td>[256, 256, 3, 3]</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>590080.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15_vgg_block_3.ReLU_5</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 256, 8, 8]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16_vgg_block_3.MaxPool2d_6</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 256, 4, 4]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17_vgg_block_4.Conv2d_0</th>\n",
       "      <td>[256, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>1180160.0</td>\n",
       "      <td>18874368.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18_vgg_block_4.ReLU_1</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19_vgg_block_4.Conv2d_2</th>\n",
       "      <td>[512, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>2359808.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20_vgg_block_4.ReLU_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>21_vgg_block_4.Conv2d_4</th>\n",
       "      <td>[512, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>2359808.0</td>\n",
       "      <td>37748736.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>22_vgg_block_4.ReLU_5</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 4, 4]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>23_vgg_block_4.MaxPool2d_6</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>24_vgg_block_5.Conv2d_0</th>\n",
       "      <td>[512, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>2359808.0</td>\n",
       "      <td>9437184.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25_vgg_block_5.ReLU_1</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>26_vgg_block_5.Conv2d_2</th>\n",
       "      <td>[512, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>2359808.0</td>\n",
       "      <td>9437184.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>27_vgg_block_5.ReLU_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>28_vgg_block_5.Conv2d_4</th>\n",
       "      <td>[512, 512, 3, 3]</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>2359808.0</td>\n",
       "      <td>9437184.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>29_vgg_block_5.ReLU_5</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 2, 2]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>30_vgg_block_5.MaxPool2d_6</th>\n",
       "      <td>-</td>\n",
       "      <td>[1, 512, 1, 1]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>31_fc.Flatten_0</th>\n",
       "      <td>-</td>\n",
       "      <td>[512]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>32_fc.Linear_1</th>\n",
       "      <td>[512, 4096]</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>2101248.0</td>\n",
       "      <td>2097152.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>33_fc.ReLU_2</th>\n",
       "      <td>-</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>34_fc.Dropout_3</th>\n",
       "      <td>-</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>35_fc.Linear_4</th>\n",
       "      <td>[4096, 4096]</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>16781312.0</td>\n",
       "      <td>16777216.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>36_fc.ReLU_5</th>\n",
       "      <td>-</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>37_fc.Dropout_6</th>\n",
       "      <td>-</td>\n",
       "      <td>[4096]</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>38_fc.Linear_7</th>\n",
       "      <td>[4096, 10]</td>\n",
       "      <td>[10]</td>\n",
       "      <td>40970.0</td>\n",
       "      <td>40960.0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                Kernel Shape      Output Shape      Params  \\\n",
       "Layer                                                                        \n",
       "0_vgg_block_1.Conv2d_0         [3, 64, 3, 3]   [1, 64, 32, 32]      1792.0   \n",
       "1_vgg_block_1.ReLU_1                       -   [1, 64, 32, 32]         NaN   \n",
       "2_vgg_block_1.Conv2d_2        [64, 64, 3, 3]   [1, 64, 32, 32]     36928.0   \n",
       "3_vgg_block_1.ReLU_3                       -   [1, 64, 32, 32]         NaN   \n",
       "4_vgg_block_1.MaxPool2d_4                  -   [1, 64, 16, 16]         NaN   \n",
       "5_vgg_block_2.Conv2d_0       [64, 128, 3, 3]  [1, 128, 16, 16]     73856.0   \n",
       "6_vgg_block_2.ReLU_1                       -  [1, 128, 16, 16]         NaN   \n",
       "7_vgg_block_2.Conv2d_2      [128, 128, 3, 3]  [1, 128, 16, 16]    147584.0   \n",
       "8_vgg_block_2.ReLU_3                       -  [1, 128, 16, 16]         NaN   \n",
       "9_vgg_block_2.MaxPool2d_4                  -    [1, 128, 8, 8]         NaN   \n",
       "10_vgg_block_3.Conv2d_0     [128, 256, 3, 3]    [1, 256, 8, 8]    295168.0   \n",
       "11_vgg_block_3.ReLU_1                      -    [1, 256, 8, 8]         NaN   \n",
       "12_vgg_block_3.Conv2d_2     [256, 256, 3, 3]    [1, 256, 8, 8]    590080.0   \n",
       "13_vgg_block_3.ReLU_3                      -    [1, 256, 8, 8]         NaN   \n",
       "14_vgg_block_3.Conv2d_4     [256, 256, 3, 3]    [1, 256, 8, 8]    590080.0   \n",
       "15_vgg_block_3.ReLU_5                      -    [1, 256, 8, 8]         NaN   \n",
       "16_vgg_block_3.MaxPool2d_6                 -    [1, 256, 4, 4]         NaN   \n",
       "17_vgg_block_4.Conv2d_0     [256, 512, 3, 3]    [1, 512, 4, 4]   1180160.0   \n",
       "18_vgg_block_4.ReLU_1                      -    [1, 512, 4, 4]         NaN   \n",
       "19_vgg_block_4.Conv2d_2     [512, 512, 3, 3]    [1, 512, 4, 4]   2359808.0   \n",
       "20_vgg_block_4.ReLU_3                      -    [1, 512, 4, 4]         NaN   \n",
       "21_vgg_block_4.Conv2d_4     [512, 512, 3, 3]    [1, 512, 4, 4]   2359808.0   \n",
       "22_vgg_block_4.ReLU_5                      -    [1, 512, 4, 4]         NaN   \n",
       "23_vgg_block_4.MaxPool2d_6                 -    [1, 512, 2, 2]         NaN   \n",
       "24_vgg_block_5.Conv2d_0     [512, 512, 3, 3]    [1, 512, 2, 2]   2359808.0   \n",
       "25_vgg_block_5.ReLU_1                      -    [1, 512, 2, 2]         NaN   \n",
       "26_vgg_block_5.Conv2d_2     [512, 512, 3, 3]    [1, 512, 2, 2]   2359808.0   \n",
       "27_vgg_block_5.ReLU_3                      -    [1, 512, 2, 2]         NaN   \n",
       "28_vgg_block_5.Conv2d_4     [512, 512, 3, 3]    [1, 512, 2, 2]   2359808.0   \n",
       "29_vgg_block_5.ReLU_5                      -    [1, 512, 2, 2]         NaN   \n",
       "30_vgg_block_5.MaxPool2d_6                 -    [1, 512, 1, 1]         NaN   \n",
       "31_fc.Flatten_0                            -             [512]         NaN   \n",
       "32_fc.Linear_1                   [512, 4096]            [4096]   2101248.0   \n",
       "33_fc.ReLU_2                               -            [4096]         NaN   \n",
       "34_fc.Dropout_3                            -            [4096]         NaN   \n",
       "35_fc.Linear_4                  [4096, 4096]            [4096]  16781312.0   \n",
       "36_fc.ReLU_5                               -            [4096]         NaN   \n",
       "37_fc.Dropout_6                            -            [4096]         NaN   \n",
       "38_fc.Linear_7                    [4096, 10]              [10]     40970.0   \n",
       "\n",
       "                             Mult-Adds  \n",
       "Layer                                   \n",
       "0_vgg_block_1.Conv2d_0       1769472.0  \n",
       "1_vgg_block_1.ReLU_1               NaN  \n",
       "2_vgg_block_1.Conv2d_2      37748736.0  \n",
       "3_vgg_block_1.ReLU_3               NaN  \n",
       "4_vgg_block_1.MaxPool2d_4          NaN  \n",
       "5_vgg_block_2.Conv2d_0      18874368.0  \n",
       "6_vgg_block_2.ReLU_1               NaN  \n",
       "7_vgg_block_2.Conv2d_2      37748736.0  \n",
       "8_vgg_block_2.ReLU_3               NaN  \n",
       "9_vgg_block_2.MaxPool2d_4          NaN  \n",
       "10_vgg_block_3.Conv2d_0     18874368.0  \n",
       "11_vgg_block_3.ReLU_1              NaN  \n",
       "12_vgg_block_3.Conv2d_2     37748736.0  \n",
       "13_vgg_block_3.ReLU_3              NaN  \n",
       "14_vgg_block_3.Conv2d_4     37748736.0  \n",
       "15_vgg_block_3.ReLU_5              NaN  \n",
       "16_vgg_block_3.MaxPool2d_6         NaN  \n",
       "17_vgg_block_4.Conv2d_0     18874368.0  \n",
       "18_vgg_block_4.ReLU_1              NaN  \n",
       "19_vgg_block_4.Conv2d_2     37748736.0  \n",
       "20_vgg_block_4.ReLU_3              NaN  \n",
       "21_vgg_block_4.Conv2d_4     37748736.0  \n",
       "22_vgg_block_4.ReLU_5              NaN  \n",
       "23_vgg_block_4.MaxPool2d_6         NaN  \n",
       "24_vgg_block_5.Conv2d_0      9437184.0  \n",
       "25_vgg_block_5.ReLU_1              NaN  \n",
       "26_vgg_block_5.Conv2d_2      9437184.0  \n",
       "27_vgg_block_5.ReLU_3              NaN  \n",
       "28_vgg_block_5.Conv2d_4      9437184.0  \n",
       "29_vgg_block_5.ReLU_5              NaN  \n",
       "30_vgg_block_5.MaxPool2d_6         NaN  \n",
       "31_fc.Flatten_0                    NaN  \n",
       "32_fc.Linear_1               2097152.0  \n",
       "33_fc.ReLU_2                       NaN  \n",
       "34_fc.Dropout_3                    NaN  \n",
       "35_fc.Linear_4              16777216.0  \n",
       "36_fc.ReLU_5                       NaN  \n",
       "37_fc.Dropout_6                    NaN  \n",
       "38_fc.Linear_7                 40960.0  "
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 统计模型参数量\n",
    "from torchsummaryX import summary\n",
    "summary(model, torch.zeros(1,3,32,32))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 定义 Loss 和优化算法\n",
    "Loss = nn.CrossEntropyLoss()\n",
    "optimizer = optim.Adam(\n",
    "                model.parameters(),\n",
    "                lr=0.0001,\n",
    "            )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate_accuracy(data_iter, net, device=None):\n",
    "    if device is None and isinstance(net, torch.nn.Module):\n",
    "        device = list(net.parameters())[0].device \n",
    "    acc_sum, n = 0.0, 0\n",
    "    with torch.no_grad():\n",
    "        for X, y in data_iter:\n",
    "            if isinstance(net, torch.nn.Module):\n",
    "                net.eval() # 评估模式, 这会关闭dropout\n",
    "                acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()\n",
    "                net.train() # 改回训练模式\n",
    "            else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU\n",
    "                if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数\n",
    "                    # 将is_training设置成False\n",
    "                    acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item() \n",
    "                else:\n",
    "                    acc_sum += (net(X).argmax(dim=1) == y).float().sum().item() \n",
    "            n += y.shape[0]\n",
    "    return acc_sum / n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 1, loss 1.9275, train acc 0.215, test acc 0.328, time 56.5 sec\n",
      "epoch 2, loss 1.5150, train acc 0.416, test acc 0.497, time 56.7 sec\n",
      "epoch 3, loss 1.1293, train acc 0.592, test acc 0.675, time 56.5 sec\n",
      "epoch 4, loss 0.8692, train acc 0.696, test acc 0.727, time 58.6 sec\n",
      "epoch 5, loss 0.7009, train acc 0.759, test acc 0.708, time 62.8 sec\n"
     ]
    }
   ],
   "source": [
    "# 训练\n",
    "import time\n",
    "\n",
    "epochs = 5\n",
    "device = torch.device('cuda')\n",
    "\n",
    "model = model.to(device)\n",
    "for epoch in range(epochs):\n",
    "    train_l_sum, train_acc_sum, n, batch_count, start = 0.0, 0.0, 0, 0, time.time()\n",
    "    for X, y in data_iter:\n",
    "        X = X.to(device)\n",
    "        y = y.to(device)\n",
    "        pred_y = model(X)\n",
    "        l = Loss(pred_y, y)\n",
    "        optimizer.zero_grad()\n",
    "        l.backward()\n",
    "        optimizer.step()\n",
    "        train_l_sum += l.cpu().item()\n",
    "        train_acc_sum += (pred_y.argmax(dim=1) == y).sum().cpu().item()\n",
    "        n += y.shape[0]\n",
    "        batch_count += 1\n",
    "    val_acc = evaluate_accuracy(val_iter, model)\n",
    "    print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, time %.1f sec'\n",
    "              % (epoch + 1, train_l_sum / batch_count, train_acc_sum / n, val_acc, time.time() - start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "torch",
   "language": "python",
   "name": "torch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
