{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "import os\n",
    "import time\n",
    "import json\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from PIL import Image\n",
    "import matplotlib.pyplot as plt\n",
    "from torchvision.ops.misc import FrozenBatchNorm2d\n",
    "\n",
    "from torchvision import transforms"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 网络框架"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## ResNet"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "class Bottleneck(nn.Module):\n",
    "    \"\"\"\n",
    "    Args:\n",
    "        in_channel:输入通道数\n",
    "        out_channel:中间卷积层通道数，最终输出通道数\n",
    "    Returns:\n",
    "        输出通道数为:out_channel * expansion\n",
    "    \"\"\"\n",
    "    expansion = 4\n",
    "\n",
    "    def __init__(self, in_channel, out_channel, stride=1, downsample=None, norm_layer=None):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        if norm_layer is None:\n",
    "            norm_layer = nn.BatchNorm2d\n",
    "\n",
    "        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,\n",
    "                               kernel_size=1, stride=1, bias=False)  # squeeze channels\n",
    "        self.bn1 = norm_layer(out_channel)\n",
    "        # -----------------------------------------\n",
    "        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,\n",
    "                               kernel_size=3, stride=stride, bias=False, padding=1)\n",
    "        self.bn2 = norm_layer(out_channel)\n",
    "        # -----------------------------------------\n",
    "        self.conv3 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel * self.expansion,\n",
    "                               kernel_size=1, stride=1, bias=False)  # unsqueeze channels\n",
    "        self.bn3 = norm_layer(out_channel * self.expansion)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.downsample = downsample\n",
    "\n",
    "    def forward(self, x):\n",
    "        identity = x\n",
    "        if self.downsample is not None:\n",
    "            identity = self.downsample(x)\n",
    "\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "\n",
    "        out += identity\n",
    "        out = self.relu(out)\n",
    "\n",
    "        return out\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "    def __init__(self, block, blocks_num, num_classes=1000, include_top=True, norm_layer=None):\n",
    "        super(ResNet, self).__init__()\n",
    "        if norm_layer is None:\n",
    "            norm_layer = nn.BatchNorm2d\n",
    "        self._norm_layer = norm_layer\n",
    "        self.include_top = include_top\n",
    "        self.in_channel = 64\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,\n",
    "                               padding=3, bias=False)\n",
    "        self.bn1 = norm_layer(self.in_channel)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        self.layer1 = self._make_layer(block, 64, blocks_num[0])\n",
    "        self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)\n",
    "        if self.include_top:\n",
    "            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  # output size = (1, 1)\n",
    "            self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "        \n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n",
    "    \n",
    "    def _make_layer(self, block, channel, block_num, stride=1):\n",
    "        \"\"\"\n",
    "        downsample用于将BottleNeck的输入通道数和尺寸改成和输出通道数和尺寸相等，便于shortcut\n",
    "        Args:\n",
    "            channel:Bottleneck中间层的通道数\n",
    "        \"\"\"\n",
    "        norm_layer = self._norm_layer\n",
    "        downsample = None\n",
    "        if stride != 1 or self.in_channel != channel * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),\n",
    "                norm_layer(channel * block.expansion))\n",
    "\n",
    "        layers = []\n",
    "        layers.append(block(self.in_channel, channel, downsample=downsample,\n",
    "                            stride=stride, norm_layer=norm_layer))\n",
    "        self.in_channel = channel * block.expansion\n",
    "\n",
    "        for _ in range(1, block_num):\n",
    "            layers.append(block(self.in_channel, channel, norm_layer=norm_layer))\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        if self.include_top:\n",
    "            x = self.avgpool(x)         # (bs, 4*512, 1, 1)\n",
    "            x = torch.flatten(x, 1)     # (bs, 4*512)\n",
    "            x = self.fc(x)              # (bs, num_class)\n",
    "\n",
    "        return x\n",
    "    "
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "def overwrite_eps(model, eps):\n",
    "    \"\"\"\n",
    "    This method overwrites the default eps values of all the\n",
    "    FrozenBatchNorm2d layers of the model with the provided value.\n",
    "    This is necessary to address the BC-breaking change introduced\n",
    "    by the bug-fix at pytorch/vision#2933. The overwrite is applied\n",
    "    only when the pretrained weights are loaded to maintain compatibility\n",
    "    with previous versions.\n",
    "\n",
    "    Args:\n",
    "        model (nn.Module): The model on which we perform the overwrite.\n",
    "        eps (float): The new value of eps.\n",
    "    \"\"\"\n",
    "    for module in model.modules():\n",
    "        if isinstance(module, FrozenBatchNorm2d):\n",
    "            module.eps = eps\n",
    "\n",
    "def resnet50_fpn_backbone(pretrain_path=\"\",\n",
    "                          norm_layer=FrozenBatchNorm2d,   # FrozenBatchNorm2d的功能与BatchNorm2d类似，但参数无法更新\n",
    "                          trainable_layers=3,\n",
    "                          returned_layers=None,\n",
    "                          extra_blocks=None):\n",
    "    \"\"\"\n",
    "    搭建resnet50_fpn——backbone\n",
    "    Args:\n",
    "        pretrain_path: resnet50的预训练权重，如果不使用就默认为空\n",
    "        norm_layer: 官方默认的是FrozenBatchNorm2d，即不会更新参数的bn层(因为如果batch_size设置的很小会导致效果更差，还不如不用bn层)\n",
    "                    如果自己的GPU显存很大可以设置很大的batch_size，那么自己可以传入正常的BatchNorm2d层\n",
    "                    (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)\n",
    "        trainable_layers: 指定训练哪些层结构\n",
    "        returned_layers: 指定哪些层的输出需要返回\n",
    "        extra_blocks: 在输出的特征层基础上额外添加的层结构\n",
    "\n",
    "    Returns:\n",
    "\n",
    "    \"\"\"\n",
    "    resnet_backbone = ResNet(Bottleneck, [3, 4, 6, 3],\n",
    "                             include_top=False,\n",
    "                             norm_layer=norm_layer)\n"
   ],
   "outputs": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}