{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 卷积神经网络"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    " ## 1 简单的卷积网络\n",
    " ### 1.1 卷积模块"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import math\n",
    "import numpy as np\n",
    "from torch import nn, optim\n",
    "import torch.nn.functional as F\n",
    "import matplotlib.pyplot as plt\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import transforms, datasets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SimpleCNN(\n",
      "  (layer1): Sequential(\n",
      "    (conv1): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1))\n",
      "    (relu1): ReLU(inplace)\n",
      "    (pool1): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (layer2): Sequential(\n",
      "    (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (relu2): ReLU(inplace)\n",
      "    (pool2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (layer3): Sequential(\n",
      "    (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (relu3): ReLU(inplace)\n",
      "    (pool3): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
      "  )\n",
      "  (layer4): Sequential(\n",
      "    (fc1): Linear(in_features=2048, out_features=512, bias=True)\n",
      "    (fc_relu1): ReLU(inplace)\n",
      "    (fc2): Linear(in_features=512, out_features=64, bias=True)\n",
      "    (fc2_relu2): ReLU(inplace)\n",
      "    (fc3): Linear(in_features=64, out_features=10, bias=True)\n",
      "  )\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "# 定义简单的卷积网络模型\n",
    "class SimpleCNN(nn.Module):\n",
    "    def __init__(self):            # 定义网络结构\n",
    "        super().__init__()         # 输入 [batch_size,3,32,32] 3表示深度\n",
    "        layer1 = nn.Sequential()   # 叠加第1个网络层，卷积层\n",
    "        # (in_channels, out_channels, kernel_size,stride,padding)\n",
    "        layer1.add_module('conv1', nn.Conv2d(32, 3, 1, padding=1))\n",
    "        # get [batch_size, 32, 32, 32]\n",
    "        layer1.add_module('relu1', nn.ReLU(True))\n",
    "        layer1.add_module('pool1', nn.MaxPool2d(2,2))\n",
    "        # get [batch_size, 32, 16, 16]\n",
    "        self.layer1 = layer1\n",
    "        \n",
    "        layer2 = nn.Sequential()   # 定义第2个网络层\n",
    "        layer2.add_module('conv2', nn.Conv2d(32, 64, 3, 1, padding=1))\n",
    "        # get [batch_size, 64, 16, 16] \n",
    "        layer2.add_module('relu2', nn.ReLU(True))\n",
    "        layer2.add_module('pool2', nn.MaxPool2d(2,2))\n",
    "        # get [batch_size, 64, 8, 8]\n",
    "        self.layer2 = layer2\n",
    "        \n",
    "        layer3 = nn.Sequential()   # 定义第3个网络层\n",
    "        layer3.add_module('conv3', nn.Conv2d(64, 128, 3, 1, padding=1))\n",
    "        # get [batch_size, 128, 8, 8]\n",
    "        layer3.add_module('relu3', nn.ReLU(True))\n",
    "        layer3.add_module('pool3', nn.MaxPool2d(2,2))\n",
    "        # get [batch_size, 128, 4, 4]\n",
    "        self.layer3 = layer3\n",
    "        \n",
    "        layer4 = nn.Sequential()   # 定义第4个网络层，全连接层\n",
    "        layer4.add_module('fc1', nn.Linear(128*4*4, 512))\n",
    "        # get [batch_size, 512]\n",
    "        layer4.add_module('fc_relu1', nn.ReLU(True))\n",
    "        layer4.add_module('fc2', nn.Linear(512, 64))\n",
    "        # get [batch_size, 64]\n",
    "        layer4.add_module('fc2_relu2', nn.ReLU(True))\n",
    "        layer4.add_module('fc3', nn.Linear(64, 10))\n",
    "        # get [batch_size, 10]\n",
    "        self.layer4 = layer4\n",
    "    def forward(self, x):\n",
    "        conv1 = self.layer1(x)   # 前3层为卷积层\n",
    "        conv2 = self.layer2(conv1)\n",
    "        conv3 = self.layer3(conv2)\n",
    "        # 注意：全连接前要把数据的维度将为两维，-1维度长*宽*深度\n",
    "        fc_input = conv3.view(conv3.size(0), -1)  # reshape为 [batch_size, -1]\n",
    "        fc_out = self.layer4(fc_input)\n",
    "        return fc_out\n",
    "model = SimpleCNN()\n",
    "print(model)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "小结:\n",
    "- 模型层顺序：CNN+BN+ReLU+Pooling，先激活再池化;FC+ReLU+Dropout\n",
    "- 输出层不用激活函数"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.2 提取模型的层结构"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "|nn.Module属性|功能|实例|\n",
    "|-|-|-|\n",
    "|children()|返回下一级迭代器|self.layer1|\n",
    "|modules()|返回所有模块迭代器|self.layer1.conv1|\n",
    "|named_children()|返回模块的名称|其他功能同上|\n",
    "|named_modules()|返回模块的名称|其他功能同上|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1)) (1, 1) 3\n",
      "-----\n",
      "Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3, 3) 64\n",
      "-----\n"
     ]
    }
   ],
   "source": [
    "# 提取已有模型中的结构\n",
    "new_model = nn.Sequential(*list(model.children())[:2])   # *代表使用可变参数\n",
    "# print(new_model)\n",
    "# list(model.children())[0]\n",
    "for m in new_model.modules():\n",
    "    if isinstance(m, nn.Conv2d):\n",
    "        print(m, m.kernel_size, m.out_channels)\n",
    "        print('-----')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "('layer1.conv1', Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1)))\n",
      "--------------------------------------------------\n",
      "('layer2.conv2', Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))\n",
      "--------------------------------------------------\n",
      "('layer3.conv3', Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)))\n",
      "--------------------------------------------------\n",
      "1.新网络的结构:\n",
      "Sequential(\n",
      "  (conv1): Conv2d(32, 3, kernel_size=(1, 1), stride=(1, 1), padding=(1, 1))\n",
      "  (conv2): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "  (conv3): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      ")\n"
     ]
    }
   ],
   "source": [
    "# 提取模型中所有的卷积层\n",
    "conv_model = nn.Sequential()\n",
    "for layer in model.named_modules():    # layer[0]层名称, layer[1]层类型\n",
    "#     print(layer)\n",
    "    if isinstance(layer[1], nn.Conv2d):  \n",
    "        print(layer)\n",
    "        print('-'*50)\n",
    "        conv_model.add_module(layer[0].split('.')[-1], layer[1])\n",
    "print('1.新网络的结构:')\n",
    "print(conv_model)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 1.3 提取参数及初始化"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "|nn.Module属性|功能|\n",
    "|-|-|\n",
    "|parameters()|返回全部参数的迭代器|\n",
    "|named_parameters()|返回模块的名称，其他功能同上|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "layer1.conv1.weight\n",
      "torch.Size([3, 32, 1, 1])\n",
      "--------------------------------------------------\n",
      "layer1.conv1.bias\n",
      "torch.Size([3])\n",
      "--------------------------------------------------\n",
      "layer2.conv2.weight\n",
      "torch.Size([64, 32, 3, 3])\n",
      "--------------------------------------------------\n",
      "layer2.conv2.bias\n",
      "torch.Size([64])\n",
      "--------------------------------------------------\n",
      "layer3.conv3.weight\n",
      "torch.Size([128, 64, 3, 3])\n",
      "--------------------------------------------------\n",
      "layer3.conv3.bias\n",
      "torch.Size([128])\n",
      "--------------------------------------------------\n",
      "layer4.fc1.weight\n",
      "torch.Size([512, 2048])\n",
      "--------------------------------------------------\n",
      "layer4.fc1.bias\n",
      "torch.Size([512])\n",
      "--------------------------------------------------\n",
      "layer4.fc2.weight\n",
      "torch.Size([64, 512])\n",
      "--------------------------------------------------\n",
      "layer4.fc2.bias\n",
      "torch.Size([64])\n",
      "--------------------------------------------------\n",
      "layer4.fc3.weight\n",
      "torch.Size([10, 64])\n",
      "--------------------------------------------------\n",
      "layer4.fc3.bias\n",
      "torch.Size([10])\n",
      "--------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "# 提取模型的所有参数\n",
    "for param in model.named_parameters():\n",
    "    # param[0]:层名称.weight/bias  param[1]:参数值，权重是Variable\n",
    "    print(param[0])\n",
    "    print((param[1]).data.shape)\n",
    "    print('-'*50)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 提取参数并初始化\n",
    "init = nn.init\n",
    "for m in model.modules():  # 访问所有模块\n",
    "    if isinstance(m, nn.Conv2d):\n",
    "        init.normal_(m.weight.data)\n",
    "        init.xavier_normal_(m.weight.data)\n",
    "        init.kaiming_normal_(m.weight.data)\n",
    "        m.bias.data.fill_(0)\n",
    "    elif isinstance(m, nn.Linear):\n",
    "        m.weight.data.normal_()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2 卷积神经网络实例\n",
    "### 2.1 LeNet网络"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "conv层参数设置：[in_channels,out_channels,kernel_size,stride,padding]\n",
    "$$W_{conv}= \\frac{W_{input}-kernel\\_size+2*padding}{stride}+1$$\n",
    "\n",
    "maxpooling层参数设置：[kernel_size,stride,padding]\n",
    "\n",
    "fc层参数设置：[in_channels,out_channels]\n",
    "\n",
    "- 表格参数设置：[卷积核数/宽/高/深度] [池化宽/高] s:stride p:padding\n",
    "\n",
    "|结构|参数设置|数据维度|\n",
    "|-|-|-|\n",
    "|input||[batch_size,3,32,32]|\n",
    "|conv1|$[6\\times5\\times5\\times3]$<br>s=1 p=0|[batch_size,6,28,28]|\n",
    "|pool1|$[2\\times2]$<br>s=2 p=0|[batch_size,6,14,14]|\n",
    "|conv2|$[16\\times5\\times5\\times6]$<br>s=1 p=0 |[batch_size,16,10,10]|\n",
    "|pool2|$[2\\times2]$<br>s=2 p=0|[batch_size,16,5,5]|\n",
    "|fc1|$[(16*5*5)\\times120]$|[batch_size,120]|\n",
    "|fc2|$[120\\times84]$|[batch_size,84]|\n",
    "|fc3|$[84\\times10]$|[batch_size,10]|\n",
    "|--------|-------------------------------|--------------------------------|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义网络结构\n",
    "class LeNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()  # 输入维度[b,3,32,32]\n",
    "        \n",
    "        # 第一个卷积、池化模块\n",
    "        layer1 = nn.Sequential()\n",
    "        layer1.add_module('conv1', nn.Conv2d(3,6,5,1,padding=0))\n",
    "        # get [b,6,28,28]\n",
    "        layer1.add_module('pool1', nn.MaxPool2d(2,2))\n",
    "        # get [b,6,14,14]\n",
    "        self.layer1 = layer1\n",
    "        \n",
    "        # 第二个卷积、池化模块\n",
    "        layer2 = nn.Sequential()\n",
    "        layer2.add_module('conv2', nn.Conv2d(6,16,5,padding=0))\n",
    "        # get [b,16,10,10]\n",
    "        layer2.add_module('pool2', nn.MaxPool2d(2,2))\n",
    "        # get [b,16,5,5]\n",
    "        self.layer2 = layer2\n",
    "        \n",
    "        # 第三个全连接模块\n",
    "        # 先将卷积的feature map reshape为[b,16*5*5]的形状\n",
    "        layer3 = nn.Sequential()\n",
    "        layer3.add_module('fc1', nn.Linear(16*5*5, 120))\n",
    "        # get [b,120]\n",
    "        layer3.add_module('fc2', nn.Linear(120, 84))\n",
    "        # get [b, 84]\n",
    "        layer3.add_module('fc3', nn.Linear(84, 10))\n",
    "        # get [b, 10]\n",
    "        self.layer3 = layer3\n",
    "    def forward(self, x):\n",
    "        conv1 = self.layer1(x)\n",
    "        conv2 = self.layer2(conv1)\n",
    "        fc_input = conv2.view(conv2.size(0), -1)\n",
    "        fc_output = self.layer3(fc_input)\n",
    "        return fc_output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.随机输入的数据: torch.Size([1, 3, 32, 32])\n",
      "2.模型输出: torch.Size([1, 10])\n",
      "Wall time: 175 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "LeNet_model = LeNet()\n",
    "data_random = torch.Tensor(range(3*32*32))\n",
    "input_data = data_random.reshape([1,3,32,32])\n",
    "print('1.随机输入的数据:', input_data.shape)\n",
    "output = LeNet_model(Variable(input_data))\n",
    "print('2.模型输出:', output.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.2 AlexNet\n",
    "2012年ILSVRC冠军网络(8层)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "表格参数设置(与函数对应)：\n",
    "- 卷积 [in_channels,out_channels,kernel_size,stride,padding]\n",
    "\n",
    "- 池化 [kernel_size,stride,padding]\n",
    "\n",
    "|结构|参数设置|数据维度|\n",
    "|-|-|-|\n",
    "|input||[batch_size,3,227,227]|\n",
    "|conv1|[3,96,11,4,padding=0]  |[batch_size,96,55,55]|\n",
    "|conv2|[96,256,5,1,padding=2] |[batch_size,256,55,55]|\n",
    "|MaxPool|[3,2]                |[batch_size,256,27,27]|\n",
    "|conv3|[256,384,3,1,padding=1]|[batch_size,384,27,27]|\n",
    "|MaxPool|[3,2]                |[batch_size,384,13,13]|\n",
    "|conv4|[384,384,3,1,padding=1]|[batch_size,384,13,13]|\n",
    "|conv5|[384,256,3,1,padding=1]|[batch_size,256,13,13]|\n",
    "|MaxPool|[3,2]                |[batch_size,256,6,6]|\n",
    "|dense1|[256\\*6\\*6,4096]      |[batch_size,4096]|\n",
    "|dense2|[4096,4096]           |[batch_size,4096]|\n",
    "|dense3|[4096,1000]           |[batch_size,1000]|"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义AlexNet网络\n",
    "class AlexNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()    # 输入 [b,3,227,227]\n",
    "        # 定义第1个卷积层\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 96, 11, 4, 0),    # get [b,96,55,55]\n",
    "            nn.ReLU(True)\n",
    "        )\n",
    "        # 定义第2个卷积层\n",
    "        self.conv2 = nn.Sequential(\n",
    "            nn.Conv2d(96, 256, 5, 1, 2),   # get [b,256,55,55]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(3,2)              # get [b,256,27,27]\n",
    "        )\n",
    "        # 定义第3个卷积层\n",
    "        self.conv3 = nn.Sequential(\n",
    "            nn.Conv2d(256, 384, 3, 1, 1),  # get [b,384,27,27]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(3, 2)             # get [b,384,13,13]\n",
    "        )\n",
    "        # 定义第4个卷积层\n",
    "        self.conv4 = nn.Sequential(\n",
    "            nn.Conv2d(384, 384, 3, 1, 1),  # get [b,384,13,13]\n",
    "            nn.ReLU(True),\n",
    "        )\n",
    "        # 定义第5个卷积层\n",
    "        self.conv5 = nn.Sequential(\n",
    "            nn.Conv2d(384, 256, 3, 1, 1),  # get [b,256,13,13]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(3, 2)             # get [b,256,6,6]\n",
    "        )\n",
    "        # 定义6,7,8层全连接层\n",
    "        self.dense = nn.Sequential(\n",
    "            nn.Linear(256*6*6, 4096),      # get [b,4096]，需先reshape[b,9216]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(0.5),               # dropout减少正则化\n",
    "            nn.Linear(4096, 4096),         # get [b,4096]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(4096, 1000)          # get [b,1000]\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        conv1_out = self.conv1(x)\n",
    "        conv2_out = self.conv2(conv1_out)\n",
    "        conv3_out = self.conv3(conv2_out)\n",
    "        conv4_out = self.conv4(conv3_out)\n",
    "        conv5_out = self.conv5(conv4_out)\n",
    "        dense_input = conv5_out.view(conv5_out.size(0), -1)  # reshape 为一维\n",
    "        out = self.dense(dense_input)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.随机输入的数据: torch.Size([1, 3, 227, 227])\n",
      "2.模型输出: torch.Size([1, 1000])\n",
      "Wall time: 743 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "AlexNet_model = AlexNet()\n",
    "# print(model)\n",
    "data_random = torch.Tensor(range(3*227*227))\n",
    "input = data_random.reshape([1,3,227,227])\n",
    "print('1.随机输入的数据:', input.shape)\n",
    "output = AlexNet_model(Variable(input))\n",
    "print('2.模型输出:', output.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.3 VGGNet\n",
    "ImageNet 2014年亚军，使用小滤波器和更深层网络"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "VGG16(16层)网络表格参数设置(与函数对应)：\n",
    "- 卷积 [in_channels,out_channels,kernel_size,stride,padding]\n",
    "\n",
    "- 池化 [kernel_size,stride,padding]\n",
    "\n",
    "|结构|参数设置|数据维度|\n",
    "|-|-|-|\n",
    "|input||[batch_size,3,224,224]|\n",
    "|conv1|[3,64,3,1,padding=1]   |[batch_size,64,224,224]|\n",
    "|conv2|[64,64,3,1,padding=1]  |[batch_size,64,224,224]|\n",
    "|MaxPool|[2,2]                |[batch_size,64,112,112]|\n",
    "|conv3|[64,128,3,1,padding=1] |[batch_size,128,112,112]|\n",
    "|conv4|[128,128,3,1,padding=1]|[batch_size,128,112,112]|\n",
    "|MaxPool|[2,2]                |[batch_size,128,56,56]|\n",
    "|conv5|[128,256,3,1,padding=1]|[batch_size,256,56,56]|\n",
    "|conv6|[256,256,3,1,padding=1]|[batch_size,256,56,56]|\n",
    "|conv7|[256,256,3,1,padding=1]|[batch_size,256,56,56]|\n",
    "|MaxPool|[2,2]                |[batch_size,256,28,28]|\n",
    "|conv8|[256,512,3,1,padding=1]|[batch_size,512,28,28]|\n",
    "|conv9|[512,512,3,1,padding=1]|[batch_size,512,28,28]|\n",
    "|conv10|[512,512,3,1,padding=1]|[batch_size,512,28,28]|\n",
    "|MaxPool|[2,2]                 |[batch_size,512,14,14]|\n",
    "|conv11|[512,512,3,1,padding=1]|[batch_size,512,14,14]|\n",
    "|conv12|[512,512,3,1,padding=1]|[batch_size,512,14,14]|\n",
    "|conv13|[512,512,3,1,padding=1]|[batch_size,512,14,14]|\n",
    "|MaxPool|[2,2]                |[batch_size,512,7,7]|\n",
    "|dense1|[512\\*7\\*7,4096]      |[batch_size,4096]|\n",
    "|dense2|[4096,4096]           |[batch_size,4096]|\n",
    "|dense3|[4096,1000]           |[batch_size,1000]|"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "小结:\n",
    "- VGGNet中使用$3\\times3$的卷积核，stride=1，padding=1，所以卷积完后图像的宽和高不变，仅通道数改变\n",
    "- VGGNet中使用$2\\times2$的最大池化核,stride=2，所以池化后图像的宽和高会减为一半，但通道数不会改变"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义VGG-16的网络结构\n",
    "# 方法一，笨办法，逐层定义\n",
    "class VGG(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()                 # 输入 [b,3,224,224]\n",
    "        # conv参数 [in_channels,out_channels,kernel_size,stride,padding]\n",
    "        # maxpool参数 [kernel_size, stride]\n",
    "        self.features = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, 3, 1, 1),     # get [b,64,224,224]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(64, 64, 3, 1, 1),    # get [b,64,224,224]\n",
    "            nn.ReLU(True),\n",
    "            \n",
    "            nn.MaxPool2d(2, 2),            # get [b,64,112,112]\n",
    "            \n",
    "            nn.Conv2d(64, 128, 3, 1, 1),   # get [b,128,112,112]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(128, 128, 3, 1, 1),  # get [b,128,112,112]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(2, 2),            # get [b,128,56,56]\n",
    "            \n",
    "            nn.Conv2d(128, 256, 3, 1, 1),  # get [b,256,56,56]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(256, 256, 3, 1, 1),  # get [b,256,56,56]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(256, 256, 3, 1, 1),  # get [b,256,56,56]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(2, 2),            # get [b,256,28,28]\n",
    "            \n",
    "            nn.Conv2d(256, 512, 3, 1, 1),  # get [b,512,28,28]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(512, 512, 3, 1, 1),  # get [b,512,28,28]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(512, 512, 3, 1, 1),  # get [b,512,28,28]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(2, 2),            # get [b,512,14,14]\n",
    "            \n",
    "            nn.Conv2d(512, 512, 3, 1, 1),  # get [b,512,14,14]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(512, 512, 3, 1, 1),  # get [b,512,14,14]\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(512, 512, 3, 1, 1),  # get [b,512,14,14]\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(2, 2)            # get [b,512,7,7]    \n",
    "        )\n",
    "        # 将卷积层reshape为一维向量\n",
    "        self.classifier = nn.Sequential(\n",
    "            nn.Linear(512*7*7, 4096),     # get [b,4096]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(4096, 4096),         # get [b,4096]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(0.5),\n",
    "            nn.Linear(4096, 1000)         # get [b,1000]\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        conv_features = self.features(x)   # 卷积层得到特征\n",
    "        # reshape 为一维向量\n",
    "        tense_input = conv_features.view(conv_features.size(0), -1)\n",
    "        output = self.classifier(tense_input)\n",
    "        return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.随机输入的数据: torch.Size([1, 3, 224, 224])\n",
      "2.模型输出: torch.Size([1, 1000])\n",
      "Wall time: 1.92 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "VGG16_model = VGG()\n",
    "# print(model)\n",
    "data_random = torch.Tensor(range(3*224*224))\n",
    "input = data_random.reshape([1,3,224,224])\n",
    "print('1.随机输入的数据:', input.shape)\n",
    "output = VGG16_model(Variable(input))\n",
    "print('2.模型输出:', output.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 方法二，完整的定义VGG网络\n",
    "# 定义VGGNet的4种结构\n",
    "cfg = {\n",
    "    'VGG11':[64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'VGG13':[64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],\n",
    "    'VGG16':[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512,\\\n",
    "             512, 512, 'M'],\n",
    "    'VGG19':[64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, \\\n",
    "             'M', 512, 512, 512, 512, 'M']    \n",
    "}\n",
    "class VGG(nn.Module):\n",
    "    def __init__(self, vgg_name, num_classes=10):\n",
    "        super().__init__()    # 输入 [b,3,224,224]\n",
    "        self.features = self._make_layers(cfg[vgg_name])    # 卷积层提取特征\n",
    "        self.classifier = nn.Sequential(\n",
    "            # fc6\n",
    "            nn.Linear(512*7*7, 4096),        # get [b,4096]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(),\n",
    "            # fc7\n",
    "            nn.Linear(4096, 4096),           # get [b,4096]\n",
    "            nn.ReLU(True),\n",
    "            nn.Dropout(),\n",
    "            # fc8\n",
    "            nn.Linear(4096, num_classes)     # get [b,num_classes]          \n",
    "        )\n",
    "        self._initialize_weights()           # 初始化权重！！！\n",
    "    def forward(self, x):\n",
    "        conv_features = self.features(x)     # 卷积提取特征 \n",
    "        tense_input = conv_features.view(conv_features.size(0), -1)   # reshape为一维向量\n",
    "        output = self.classifier(tense_input)\n",
    "        return output\n",
    "    \n",
    "    # 生成网络的层信息\n",
    "    def _make_layers(self, net_cfg):         \n",
    "        layers = []    # 将网络的结构写入到列表中\n",
    "        in_channels = 3\n",
    "        for x in net_cfg:\n",
    "            if x == 'M':\n",
    "                layers += [nn.MaxPool2d(kernel_size=2, stride=2)]    # 将结构append到列表中\n",
    "            else:\n",
    "                layers += [nn.Conv2d(in_channels, x, kernel_size=3, stride=1, padding=1),\n",
    "                           nn.BatchNorm2d(x),\n",
    "                           nn.ReLU(inplace=True)]\n",
    "                in_channels = x                # 令下一层的输入为上层的输出\n",
    "        # 有了均值池化层，可以减少全连接层的个数！！！！！！\n",
    "#         layers += [nn.AvgPool2d(kernel_size=1, stride=1)] \n",
    "        return nn.Sequential(*layers)          # *代表可变参数，列表用*，字典用**\n",
    "    \n",
    "    # 初始化网络权重\n",
    "    def _initialize_weights(self):\n",
    "        for m in self.modules():               # 访问网络的各个模块\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                n = m.kernel_size[0]*m.kernel_size[1]*m.out_channels\n",
    "                m.weight.data.normal_(0, math.sqrt(2.0 /n ))    # 卷积权重初始化方法\n",
    "                if m.bias is not None:\n",
    "                    m.bias.data.zero_()                         # 卷积偏置初始为0\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                m.weight.data.fill_(1)              # BN层权重初始为1\n",
    "                m.bias.data.zero_()                 # BN层偏置初始为0\n",
    "            elif isinstance(m, nn.Linear):\n",
    "                m.weight.data.normal_(0, 0.01)      # fc层权重初始为高斯分布\n",
    "                m.bias.data.zero_()                 # fc层偏置初始为0                "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.网络的输出: torch.Size([1, 10])\n",
      "Wall time: 3.03 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "vgg16_net = VGG('VGG16')\n",
    "# print(vgg16_net)\n",
    "input = torch.randn([1,3,224,224])\n",
    "output = vgg16_net(Variable(input))\n",
    "print('1.网络的输出:', output.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.4 GoogLeNet\n",
    "又称InceptionNet(22层)，2014年ImageNet冠军，参数比AlenNet少12倍"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<center>GoogLeNet(Inception_v1)网络配置</center>\n",
    "<img src=\"./image/googlenet_cfg.png\" width=\"90%\" height=\"90%\">\n",
    "<img src=\"./image/Inception_module.png\" width=\"70%\" height=\"70%\">\n",
    "\n",
    "- 使用多尺度卷积，将相关性强的特征汇聚到一起进行融合，增强了特征的表达能力，减少了单卷积核造成的稀疏性，该方法相当于对稀疏的矩阵分解为稠密的子矩阵进行运算，提高了计算的效率\n",
    "- 多种卷积核提取特征，得到的feature map宽和高相同且**与输入一致**，但深度不同，最后各卷积核的结果在**深度**方向上融合，**尺寸不变，深度增加**\n",
    "- $3\\times3$和$5\\times5$卷积前先使用$1\\times1$卷积核对输入数据进行降维(只有深度上变化)，可以减少网络参数，并不影响性能\n",
    "- 在Inception_v2中使用$n\\times1$与$1\\times n$代替$n\\times n$的卷积核，但该分解在中度大小的feature map(12~20)效果会很好，在网络前面的层尽量少用\n",
    "- 在网络的前几层，尽量得到较大的特征图，使深度逐渐加深，避免表达瓶颈(数据维度急剧降低，损失有效信息)\n",
    "- **完整的卷积公式**：$w_{out}=floor[(w_{input}-filter+2*padding)\\ /\\ stride] + 1 $，Inception输入维度为$224\\times224\\times3$，第一个卷积核为7，步长为2，padding为3，根据该公式卷积后得到的feature map大小为$floor(111.5)+1=112$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义Inception_v1网络结构\n",
    "# 1.定义Inception模块部分\n",
    "# 卷积函数默认stride=1，padding=0\n",
    "class Inception(nn.Module):\n",
    "    def __init__(self, in_channels, n1x1, n3x3_r, n3x3, n5x5_r, n5x5, pool3x3):\n",
    "        super().__init__()    # 输入深度为 in_channels, 剩下参数为各分支的输出深度，宽和高不变\n",
    "        # 1x1 conv branch\n",
    "        self.b1 = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, n1x1, kernel_size=1, stride=1, padding=0),    # get depth n1x1\n",
    "            nn.BatchNorm2d(n1x1),\n",
    "            nn.ReLU(True)\n",
    "        )\n",
    "        \n",
    "        # 1x1 conv -> 3x3 conv branch\n",
    "        self.b2 = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, n3x3_r, kernel_size=1, stride=1, padding=0),  # get depth n3x3_r\n",
    "            nn.BatchNorm2d(n3x3_r),\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(n3x3_r, n3x3, kernel_size=3, stride=1, padding=1),         # get depth n3x3\n",
    "            nn.BatchNorm2d(n3x3),\n",
    "            nn.ReLU(True)\n",
    "        )\n",
    "        \n",
    "        # 1x1 conv -> 5x5 conv branch，用两个3x3的代替5x5卷积核\n",
    "        self.b3 = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, n5x5_r, kernel_size=1, stride=1, padding=0),   # get depth n5x5_r\n",
    "            nn.BatchNorm2d(n5x5_r),\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(n5x5_r, n5x5, kernel_size=3, stride=1, padding=1),          # get depth n5x5\n",
    "            nn.BatchNorm2d(n5x5),\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(n5x5, n5x5, kernel_size=3, stride=1, padding=1),            # get depth n5x5\n",
    "            nn.BatchNorm2d(n5x5),\n",
    "            nn.ReLU(True),\n",
    "        )\n",
    "        \n",
    "        # 3x3 pool -> 1x1 conv\n",
    "        self.b4 = nn.Sequential(\n",
    "            nn.MaxPool2d(kernel_size=3, stride=1, padding=1),               # get depth in_channels\n",
    "            nn.Conv2d(in_channels, pool3x3, kernel_size=1, stride=1, padding=0),\n",
    "            nn.BatchNorm2d(pool3x3),\n",
    "            nn.ReLU(True)                                                   # get depth pool3x3\n",
    "        )\n",
    "    def forward(self, x):\n",
    "        branch1 = self.b1(x)\n",
    "        branch2 = self.b2(x)\n",
    "        branch3 = self.b3(x)\n",
    "        branch4 = self.b4(x)\n",
    "#         print(branch1.shape, branch2.shape, branch3.shape, branch4.shape,)\n",
    "        return torch.cat([branch1, branch2, branch3, branch4], 1)\n",
    "\n",
    "#定义GoogLeNet的网络结构\n",
    "class GoogLeNet(nn.Module):\n",
    "    def __init__(self, num_classes=10):\n",
    "        super().__init__()                         # 输入 [b,3,224,224]\n",
    "        # 先设置Inception前的卷积层，共 3 层   \n",
    "        self.pre_layers = nn.Sequential(\n",
    "            # 7x7 conv\n",
    "            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),     # 向下取整 get [b,64,112,112]\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2, padding=1),         # get [b,64,56,56]\n",
    "            \n",
    "            # 1x1 conv -> 3x3 conv\n",
    "            nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0),    # get [b,64,56,56]\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(True),\n",
    "            nn.Conv2d(64, 192, kernel_size=3, stride=1, padding=1),   # get [b,192,56,56]\n",
    "            nn.BatchNorm2d(192),\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)          # get [b,192,28,28]\n",
    "        )\n",
    "        \n",
    "        # Inception模块部分 参数：[in_channels, n1x1, n3x3_r, n3x3, n5x5_r, n5x5, pool3x3]\n",
    "        self.a3 = Inception(192, 64,  96,  128,  16, 32, 32)             # get [b,256,28,28]\n",
    "        self.b3 = Inception(256, 128, 128, 192,  32, 96, 64)             # get [b,480,28,28]   \n",
    "        self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)    # get [b,480,14,14]\n",
    "        \n",
    "        self.a4 = Inception(480, 192, 96,  208,  16, 48,  64)             # get [b,512,14,14]\n",
    "        self.b4 = Inception(512, 160, 112, 224,  24, 64,  64)             # get [b,512,14,14]\n",
    "        self.c4 = Inception(512, 128, 128, 256,  24, 64,  64)             # get [b,512,14,14]\n",
    "        self.d4 = Inception(512, 112, 144, 288,  32, 64,  64)             # get [b,528,14,14]\n",
    "        self.e4 = Inception(528, 256, 160, 320,  32, 128, 128)            # get [b,832,14,14]\n",
    "        self.pool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)     # get [b,832,7,7]\n",
    "        \n",
    "        self.a5 = Inception(832, 256, 160, 320, 32, 128, 128)             # get [b,832,7,7]\n",
    "        self.b5 = Inception(832, 384, 192, 384, 48, 128, 128)             # get [b,1024,7,7]\n",
    "        self.pool5 = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)     # get [b,1024,1,1]\n",
    "        \n",
    "        self.dropout = nn.Dropout(0.4)                             \n",
    "        self.classifier = nn.Linear(1024, num_classes)                    # get [b,num_classes]\n",
    "    def forward(self, x):   # Inception_v1共22层，这里没有添加a4与d4后的辅助损失，无自定义权重初始化\n",
    "        x = self.pre_layers(x)       # 3层\n",
    "        x = self.a3(x)               # 4层\n",
    "        x = self.b3(x)\n",
    "        x = self.pool3(x)\n",
    "        \n",
    "        x = self.a4(x)               # 10层\n",
    "        x = self.b4(x)\n",
    "        x = self.c4(x)\n",
    "        x = self.d4(x)\n",
    "        x = self.e4(x)\n",
    "        x = self.pool4(x)\n",
    "        \n",
    "        x = self.a5(x)               # 4层\n",
    "        x = self.b5(x)\n",
    "        x = self.pool5(x)\n",
    "        x = self.dropout(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.classifier(x)       # 1层\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.Inception模块的输出: torch.Size([1, 256, 224, 224])\n",
      "2.Inception_v1模型的输出: torch.Size([1, 10])\n",
      "Wall time: 989 ms\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "input = torch.randn([1,3,224,224])\n",
    "inception_module = Inception(3, 64, 96, 128, 16, 32, 32)\n",
    "output = inception_module(Variable(input))\n",
    "print('1.Inception模块的输出:', output.shape)\n",
    "inception_v1 = GoogLeNet()\n",
    "output = inception_v1(Variable(input))\n",
    "print('2.Inception_v1模型的输出:', output.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "小结:\n",
    "- Inception模块本身只改变图像的深度，不改变尺寸\n",
    "- 每次在池化后，图像的尺寸会缩小为原来的一半\n",
    "- 网络的最后几层中使用Global Average Pooling，而减少了全连接层的使用，减少参数及过拟合的可能性"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 2.5 ReNet\n",
    "2015年ImageNet冠军，残差网络解决了准确率Degradation问题及梯度消失问题，可以使网络变得更深"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<center>ResNet网络配置</center>\n",
    "\n",
    "<img src=\"./image/resnet_cfg.jpg\" width=\"85%\" height=\"85%\">\n",
    "\n",
    "\n",
    "- 按照表格中的信息，在bottleneck中1x1的卷积核里，如果使用stride=2就会忽略某些数据，caffe定义的ResNet是这样实现的，但PyTorch中是将第二层3x3卷积核步长设置为2来减小尺寸，两种方法都行吗？？？\n",
    "[PyTorch的实现](https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py) 和\n",
    "[caffe的实现](https://github.com/SnailTyan/caffe-model-zoo/blob/master/ResNet-50/train_val.prototxt)\n",
    "\n",
    "<img src=\"./image/residual_block.jpg\" width=\"40%\" height=\"40%\"> \n",
    "\n",
    "\n",
    "<img src=\"./image/bottleneck.jpg\" width=\"60%\" height=\"60%\">\n",
    "\n",
    "- 使用skip connection连接(或短路连接)的方式构建残差，可以解决degradation问题及梯度消失问题\n",
    "- bottleneck连接的方式可以减少参数\n",
    "- 对于Indentity map部分，如果输入和输出维度一致，直接相加即可，如果不一致有两种方法：(1) 使用zero-padding增加维度，但要做一个pooling保证参数不变；(2) 采用新的映射(projection shortcut)，一般使用1x1的卷积，但会增加计算量和参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义ResNet的网络结构\n",
    "# 定义基本的Block用于18、34层的ResNet网络\n",
    "class BasicBlock(nn.Module):\n",
    "    expansion = 1   # 在输入维度和输出维度相同的块，输入和输出深度相差的倍数\n",
    "    def __init__(self, in_channels, out_channels, stride=1):\n",
    "        super().__init__()             # 输入 [b, in_channels,xx,xx]\n",
    "        # get [b,out_channels,xx,xx] 输入尺寸与stride有关：1 尺寸不变，2 缩为一半\n",
    "        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(out_channels)\n",
    "        # gwt [b,out_channels,xx,xx] 输入尺寸不变\n",
    "        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(out_channels)\n",
    "        \n",
    "        self.residual = nn.Sequential()\n",
    "        if stride != 1 or in_channels != self.expansion*out_channels:\n",
    "            self.residual = nn.Sequential(\n",
    "                # 该句使skip connection的输入输出维度变得一致 self.expansion*out_channels与stride很关键\n",
    "                nn.Conv2d(in_channels, self.expansion*out_channels, kernel_size=1, stride=stride, padding=0, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*out_channels)\n",
    "            )\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = F.relu(out)          # 残差块的第一层输出，需要激活\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)        # 残差块的第二层，没有激活\n",
    "        out += self.residual(x)    # 与残差的identity map即skip connection部分叠加\n",
    "        out = F.relu(out)          # 原输出与skip connection叠加的输出同时做激活\n",
    "        return out\n",
    "# 定义Bottleneck模块\n",
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4                  # 同一组中输入和输出维度的相差倍数\n",
    "    def __init__(self, in_channels, out_channels, stride=1):\n",
    "        super().__init__()\n",
    "        # stride=2 尺寸变为原来一半，stride=1 尺寸保持不变 get [b,out_channels,xx,xx]\n",
    "        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(out_channels)\n",
    "        # 保证尺寸不变，仅改变输出通道 get [b,out_channels,xx,xx]\n",
    "        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(out_channels)\n",
    "        # 保证尺寸不变，深度变为expansion倍 get [b,expansion*out_channels,xx,xx]\n",
    "        self.conv3 = nn.Conv2d(out_channels, self.expansion*out_channels,kernel_size=1, stride=1, padding=0, bias=False)\n",
    "        self.bn3 = nn.BatchNorm2d(self.expansion*out_channels)\n",
    "        \n",
    "        # skip connection部分\n",
    "        self.residual = nn.Sequential()\n",
    "        if stride != 1 or in_channels != self.expansion*out_channels:\n",
    "            self.residual = nn.Sequential(\n",
    "                # 统一深度与尺寸 get [b, self.expansion*out_channels,xx,xx]\n",
    "                nn.Conv2d(in_channels, self.expansion*out_channels, kernel_size=1, stride=stride, padding=0, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*out_channels)\n",
    "            )\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = F.relu(out)           # 第一层卷积的输出\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = F.relu(out)           #第二层卷积的输出\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)         # 第三层卷积的输出\n",
    "        out += self.residual(x)     # skip connection是直接从输入得到的，所以参数为 x\n",
    "        out = F.relu(out)           # 第三层卷积与residual的叠加结果再激活\n",
    "        return out\n",
    "    \n",
    "# 定义ResNet的结构\n",
    "class ResNet(nn.Module):\n",
    "    # 残差块的类别；每组残差块的个数列表，输出的类别数\n",
    "    def __init__(self, block, num_blocks: list, class_nums=10):\n",
    "        super().__init__()                                                      # 输入 [b,3,224,224]\n",
    "        self.pre_layers = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),   # get [b,64,112,112]\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(True),\n",
    "            nn.MaxPool2d(kernel_size=3, stride=2, padding=1)                    # get [b,64,56,56]\n",
    "        )\n",
    "        \n",
    "        # 堆叠残差模块\n",
    "        self.in_channels = 64\n",
    "        self.layer1 = self._make_layer(block, 64,  num_blocks[0], stride=1) # get [b,64*expansion,56,56]\n",
    "        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2) # get [b,128*expansion,28,28]\n",
    "        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2) # get [b,256*expansion,14,14]\n",
    "        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2) # get [b,512*expansion,7,7]\n",
    "        \n",
    "        self.avg_pool = nn.AvgPool2d(kernel_size=7, stride=1, padding=0)    # get [b,512*expansion,1,1]\n",
    "        self.classifier = nn.Linear(512*block.expansion, class_nums)        # get [b,class_nums]\n",
    "        \n",
    "        # 权重初始化\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                nn.init.constant_(m.weight, 1)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "                \n",
    "    def _make_layer(self, block, out_channels, num_blocks, stride):\n",
    "        layers = []      # 保存网络层为列表\n",
    "        strides = [stride] + [1]*(num_blocks-1)         #一组残差块只有第一个需要改变尺寸，即stride=2\n",
    "        for stride in strides:\n",
    "            layers += [block(self.in_channels, out_channels, stride=stride)]   # 构建残差模块\n",
    "            self.in_channels = block.expansion * out_channels                  # 随着模块堆叠而改变\n",
    "        return nn.Sequential(*layers)                                          # *表示可变参数\n",
    "    \n",
    "    def forward(self, x):\n",
    "        out = self.pre_layers(x)           # 网络首层卷积的结果\n",
    "        out = self.layer1(out)             # 4组残差块的输出\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = self.avg_pool(out)           # 全局池化\n",
    "        out = out.view(out.size(0), -1)    # reshap为一维向量\n",
    "        out = self.classifier(out)         # 进行全连接\n",
    "        return out\n",
    "\n",
    "def ResNet18():\n",
    "    return ResNet(BasicBlock, [2,2,2,2])   # 列表数字代表各组残差块的个数\n",
    "\n",
    "def ResNet34():\n",
    "    return ResNet(BasicBlock, [3,4,6,3])\n",
    "\n",
    "def ResNet50():\n",
    "    return ResNet(Bottleneck, [3,4,6,3])\n",
    "\n",
    "def ResNet101():\n",
    "    return ResNet(Bottleneck, [3,4,23,3])\n",
    "\n",
    "def ResNet152():\n",
    "    return ResNet(Bottleneck, [3,8,36,3])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.BlasicBlock的输出: torch.Size([1, 64, 112, 112])\n",
      "2.Bottleneck的输出: torch.Size([1, 256, 112, 112])\n",
      "3.ResNet18的输出: torch.Size([1, 10])\n",
      "3.ResNet50的输出: torch.Size([1, 10])\n",
      "Wall time: 1.17 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "input = Variable(torch.randn([1,3,224,224]))\n",
    "block_module = BasicBlock(3, 64, 2)\n",
    "output = block_module(input)\n",
    "print('1.BlasicBlock的输出:', output.shape)\n",
    "bottleneck_module = Bottleneck(3, 64, 2)\n",
    "output = bottleneck_module(input)\n",
    "print('2.Bottleneck的输出:', output.shape)\n",
    "resnet18 = ResNet18()\n",
    "output = resnet18(input)\n",
    "print('3.ResNet18的输出:', output.shape)\n",
    "resnet50 = ResNet50()\n",
    "output = resnet50(input)\n",
    "print('3.ResNet50的输出:', output.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "小结:\n",
    "- 对于残差块的skip connection的输入输出维度，指尺寸与深度完全一致才表示维度相同\n",
    "- 在残差块中，最后一层卷积的结果与skip connection结果**叠加**后再进行激活\n",
    "- ResNet网络中，图像的宽和高缩减为原来的一半不是使用pooling，而是将卷积核的stride设置为2\n",
    "- 一组残差块中，仅第一个残差块需要将stride设置为2来降维，组内的其他块stride=1"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
