{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import sys\n",
    "import random\n",
    "import time\n",
    "from datetime import datetime\n",
    "\n",
    "import cv2\n",
    "import math\n",
    "if(os.path.exists(\"../input/timm-pytorch-image-models/pytorch-image-models-master\")):\n",
    "    sys.path.append(\"../input/timm-pytorch-image-models/pytorch-image-models-master\")\n",
    "import timm\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from PIL import Image\n",
    "from scipy.ndimage import rotate, shift\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.utils.checkpoint as cp\n",
    "from collections import OrderedDict\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data import Dataset as dataset"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# dataloader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "## 归一化 (0,1)标准化\n",
    "def norm_zero_one(array, span=None):\n",
    "    '''\n",
    "    根据所给数组的最大值、最小值，将数组归一化到0-1\n",
    "    :param array: 数组\n",
    "    :return: array: numpy格式数组\n",
    "    '''\n",
    "    array = np.asarray(array).astype(np.float)\n",
    "    if span is None:\n",
    "        mini = array.min()\n",
    "        maxi = array.max()\n",
    "    else:\n",
    "        mini = span[0]\n",
    "        maxi = span[1]\n",
    "        array[array < mini] = mini\n",
    "        array[array > maxi] = maxi\n",
    "\n",
    "    range = maxi - mini\n",
    "\n",
    "    def norm(x):\n",
    "        return (x - mini) / range\n",
    "\n",
    "    return np.asarray(list(map(norm, array))).astype(np.float)\n",
    "\n",
    "\n",
    "## 加载一张普通格式图片 2D\n",
    "def get_normal_image(path, w, h):\n",
    "    '''\n",
    "    加载一幅普通格式的2D图像，支持格式：.jpg, .jpeg, .tif ...\n",
    "    :param path: 医学图像的路径\n",
    "    :return: array: numpy格式\n",
    "    '''\n",
    "    array = Image.open(path).resize((w, h))\n",
    "    array = np.asarray(array)\n",
    "    return array\n",
    "\n",
    "\n",
    "class Dataset(dataset):\n",
    "    def __init__(self, **config):\n",
    "        super(Dataset, self).__init__()\n",
    "\n",
    "        ## 图像文件夹\n",
    "        self.img_dire = config['data_path']\n",
    "        ## label路径\n",
    "        self.csv_path = config[\"csv_path\"]\n",
    "        ## 图像高、宽\n",
    "        self.h, self.w = config['h'], config['w']\n",
    "        ## 增强倍数 [default:1 不增强]\n",
    "        self.aug_scale = config[\"aug_scale\"]\n",
    "\n",
    "        print('-----------------------------------------------')\n",
    "        print('----------- Loading Training Images -----------')\n",
    "        print('-----------------------------------------------')\n",
    "        print(\"Image dire:    {}\".format(self.img_dire))\n",
    "        print(\"Label path:    {}\".format(self.csv_path))\n",
    "        print(\"Image w:{}   h:{}\".format(self.w, self.h))\n",
    "        print(\"Augment scale: {}\".format(self.aug_scale))\n",
    "        time.sleep(0.5)\n",
    "\n",
    "        ## 只读取csv文件，图片文件索引时读取\n",
    "        self.csv = pd.read_csv(self.csv_path)\n",
    "        self.imsize = len(self.csv) * self.aug_scale\n",
    "        print(\"Load finished! num:{}\".format(self.imsize))\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        ## 索引\n",
    "        index = idx // self.aug_scale\n",
    "\n",
    "        ## 读取图片\n",
    "        img_id = self.csv.iloc[index, 0]\n",
    "        img_path = os.path.join(self.img_dire, img_id)\n",
    "        img = get_normal_image(img_path, self.w, self.h).transpose([2, 0, 1])\n",
    "        img = norm_zero_one(img, span=[0, 255])  ## 归一化\n",
    "\n",
    "        ## 因为使用Dataloader封装，不能是字符串，所以这里将XXXX.jpg前的索引提出\n",
    "        img_id = int(img_id[:-4])\n",
    "\n",
    "        ## 读取标签\n",
    "        lab = self.csv.iloc[index, 1]\n",
    "        lab = torch.tensor(lab, dtype=torch.long)\n",
    "\n",
    "        ## 进行图像增强\n",
    "        if index % self.aug_scale != 0:\n",
    "            random.seed(datetime.now())\n",
    "            angle = random.uniform(-30, 30)\n",
    "            ## 因为前面对img的维度进行转换，所以这里旋转的axes换成了(2,1)\n",
    "            img = rotate(img, angle, axes=(2, 1), reshape=False)\n",
    "\n",
    "            shifts = [30, 30]\n",
    "            x_shift = random.uniform(-shifts[0], shifts[0])\n",
    "            y_shift = random.uniform(-shifts[1], shifts[1])\n",
    "            img = shift(img, shift=[0, x_shift, y_shift])\n",
    "            img = norm_zero_one(img, span=[0.0, 1.0])\n",
    "            img = np.asarray(img).astype(np.float)\n",
    "            img = torch.tensor(img, dtype=torch.float)\n",
    "            return img_id, img, lab\n",
    "\n",
    "        img = np.asarray(img).astype(np.float)\n",
    "        img = torch.tensor(img, dtype=torch.float)\n",
    "        return img_id, img, lab\n",
    "\n",
    "    def __len__(self):\n",
    "        return self.imsize\n",
    "\n",
    "\n",
    "def get_dataloader(**config):\n",
    "    return DataLoader(dataset=Dataset(**config),\n",
    "                      batch_size=config[\"batch_size\"],\n",
    "                      shuffle=config[\"shuffle\"],\n",
    "                      drop_last=config[\"drop_last\"],\n",
    "                      num_workers=config[\"num_workers\"],\n",
    "                      pin_memory=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## resnext"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class resnext50_32x4d(nn.Module):\n",
    "    def __init__(self, model_name='resnext50_32x4d',pretrained=False, **kwargs):\n",
    "        super(resnext50_32x4d, self).__init__()\n",
    "        self.kwargs = kwargs\n",
    "        self.model_name = model_name\n",
    "        self.pretrained = pretrained\n",
    "        self.model = timm.create_model(self.model_name, pretrained=self.pretrained)\n",
    "        n_features = self.model.fc.in_features\n",
    "        self.model.fc = nn.Linear(n_features, self.kwargs[\"num_classes\"])\n",
    "        \n",
    "    def forward(self,x):\n",
    "        y = self.model(x)\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## densenet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161']\n",
    "\n",
    "class _DenseLayer(nn.Sequential):\n",
    "    def __init__(self, num_input_features, growth_rate, bn_size, drop_rate, memory_efficient=False):#num_input_features特征层数\n",
    "        super(_DenseLayer, self).__init__()#growth_rate=32增长率 bn_size=4\n",
    "        #（56 * 56 * 64）\n",
    "        self.add_module('norm1', nn.BatchNorm2d(num_input_features)),\n",
    "        self.add_module('relu1', nn.ReLU(inplace=True)),\n",
    "        self.add_module('conv1', nn.Conv2d(num_input_features, bn_size *\n",
    "                                           growth_rate, kernel_size=1, stride=1,\n",
    "                                           bias=False)),\n",
    "        self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)),\n",
    "        self.add_module('relu2', nn.ReLU(inplace=True)),\n",
    "        self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate,\n",
    "                                           kernel_size=3, stride=1, padding=1,\n",
    "                                           bias=False)),\n",
    "        #（56 * 56 * 32）\n",
    "        self.drop_rate = drop_rate\n",
    "        self.memory_efficient = memory_efficient\n",
    "\n",
    "    def forward(self, *prev_features):\n",
    "        bn_function = _bn_function_factory(self.norm1, self.relu1, self.conv1)#（56 * 56 * 64*3）\n",
    "        if self.memory_efficient and any(prev_feature.requires_grad for prev_feature in prev_features):\n",
    "            bottleneck_output = cp.checkpoint(bn_function, *prev_features)\n",
    "        else:\n",
    "            bottleneck_output = bn_function(*prev_features)\n",
    "        # bn1 + relu1 + conv1\n",
    "        new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))\n",
    "        if self.drop_rate > 0:\n",
    "            new_features = F.dropout(new_features, p=self.drop_rate,\n",
    "                                     training=self.training)\n",
    "        return new_features\n",
    "\n",
    "def _bn_function_factory(norm, relu, conv):\n",
    "    def bn_function(*inputs):\n",
    "        # type(List[Tensor]) -> Tensor\n",
    "        concated_features = torch.cat(inputs, 1)#按通道合并\n",
    "        # bn1 + relu1 + conv1\n",
    "        bottleneck_output = conv(relu(norm(concated_features)))\n",
    "        return bottleneck_output\n",
    "\n",
    "    return bn_function\n",
    "\n",
    "\n",
    "\n",
    "class _DenseBlock(nn.Module):\n",
    "    def __init__(self, num_layers, num_input_features, bn_size, growth_rate, drop_rate, memory_efficient=False):\n",
    "        super(_DenseBlock, self).__init__()#num_layers层重复次数\n",
    "        for i in range(num_layers):\n",
    "            layer = _DenseLayer(\n",
    "                num_input_features + i * growth_rate,   #for一次层数增加32\n",
    "                growth_rate=growth_rate,\n",
    "                bn_size=bn_size,\n",
    "                drop_rate=drop_rate,\n",
    "                memory_efficient=memory_efficient,\n",
    "            )\n",
    "            self.add_module('denselayer%d' % (i + 1), layer)    # 追加denselayer层到字典里面\n",
    "\n",
    "    def forward(self, init_features):\n",
    "        features = [init_features]  #原来的特征，64\n",
    "        for name, layer in self.named_children():   # 依次遍历添加的6个layer层，\n",
    "            new_features = layer(*features) #计算特征\n",
    "            features.append(new_features)   # 追加特征\n",
    "        return torch.cat(features, 1)   # 按通道数合并特征\n",
    "\n",
    "\n",
    "class _Transition(nn.Sequential):\n",
    "    def __init__(self, num_input_features, num_output_features):\n",
    "        super(_Transition, self).__init__()\n",
    "        self.add_module('norm', nn.BatchNorm2d(num_input_features))\n",
    "        self.add_module('relu', nn.ReLU(inplace=True))\n",
    "        self.add_module('conv', nn.Conv2d(num_input_features, num_output_features,\n",
    "                                          kernel_size=1, stride=1, bias=False))\n",
    "        self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2))\n",
    "\n",
    "\n",
    "class DenseNet(nn.Module):\n",
    "    r\"\"\"Densenet-BC model class, based on\n",
    "    `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n",
    "\n",
    "    Args:\n",
    "        growth_rate (int) - how many filters to add each layer (`k` in paper)\n",
    "        block_config (list of 4 ints) - how many layers in each pooling block\n",
    "        num_init_features (int) - the number of filters to learn in the first convolution layer\n",
    "        bn_size (int) - multiplicative factor for number of bottle neck layers\n",
    "          (i.e. bn_size * k features in the bottleneck layer)\n",
    "        drop_rate (float) - dropout rate after each dense layer\n",
    "        num_classes (int) - number of classification classes\n",
    "        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n",
    "          but slower. Default: *False*.\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, growth_rate=32, block_config=(6, 12, 24, 16),\n",
    "                 num_init_features=64, bn_size=4, drop_rate=0, num_classes=5, memory_efficient=False):\n",
    "\n",
    "        super(DenseNet, self).__init__()\n",
    "\n",
    "        # First convolution\n",
    "        self.features = nn.Sequential(OrderedDict([\n",
    "            ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2,\n",
    "                                padding=3, bias=False)),\n",
    "            ('norm0', nn.BatchNorm2d(num_init_features)),\n",
    "            ('relu0', nn.ReLU(inplace=True)),\n",
    "            ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),\n",
    "        ]))\n",
    "\n",
    "        # Each denseblock\n",
    "        num_features = num_init_features\n",
    "        for i, num_layers in enumerate(block_config):\n",
    "            block = _DenseBlock(\n",
    "                num_layers=num_layers,  # 层数重复次数\n",
    "                num_input_features=num_features,    # 特征层数64\n",
    "                bn_size=bn_size,\n",
    "                growth_rate=growth_rate,\n",
    "                drop_rate=drop_rate,    # dropout值 0\n",
    "                memory_efficient=memory_efficient\n",
    "            )\n",
    "            self.features.add_module('denseblock%d' % (i + 1), block)   # 追加denseblock\n",
    "            num_features = num_features + num_layers * growth_rate  # 更新num_features=64+6*32 = 256\n",
    "            if i != len(block_config) - 1:#每两个dense block之间增加一个过渡层，i != (4-1)，即 i != 3 非最后一个denseblock，后面跟_Transition层\n",
    "                trans = _Transition(num_input_features=num_features,\n",
    "                                    num_output_features=num_features // 2)  # 输出通道数减半\n",
    "                self.features.add_module('transition%d' % (i + 1), trans)\n",
    "                num_features = num_features // 2    # 更新num_features= num_features//2 取整数部分\n",
    "\n",
    "        # Final batch norm\n",
    "        self.features.add_module('norm5', nn.BatchNorm2d(num_features))\n",
    "\n",
    "        # Linear layer\n",
    "        self.classifier = nn.Linear(num_features, num_classes)\n",
    "\n",
    "        # Official init from torch repo.\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                nn.init.kaiming_normal_(m.weight)\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                nn.init.constant_(m.weight, 1)\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "            elif isinstance(m, nn.Linear):\n",
    "                nn.init.constant_(m.bias, 0)\n",
    "\n",
    "    def forward(self, x):\n",
    "        features = self.features(x) # 特征提取层\n",
    "        out = F.relu(features, inplace=True)\n",
    "        out = F.adaptive_avg_pool2d(out, (1, 1))    # 自适应均值池化，输出大小为（1，1）\n",
    "        out = torch.flatten(out, 1)\n",
    "        out = self.classifier(out)  # 分类器\n",
    "        return out\n",
    "\n",
    "def _densenet(arch, growth_rate, block_config, num_init_features, pretrained, progress,\n",
    "              **kwargs):\n",
    "    model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)\n",
    "    if pretrained:\n",
    "        _load_state_dict(model, model_urls[arch], progress)\n",
    "    return model\n",
    "\n",
    "\n",
    "def densenet121(pretrained=False, progress=True, **kwargs):\n",
    "    r\"\"\"Densenet-121 model from\n",
    "    `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n",
    "\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "        progress (bool): If True, displays a progress bar of the download to stderr\n",
    "        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n",
    "          but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n",
    "    \"\"\"\n",
    "    return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress)\n",
    "\n",
    "\n",
    "def densenet161(pretrained=False, progress=True, **kwargs):\n",
    "    r\"\"\"Densenet-161 model from\n",
    "    `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n",
    "\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "        progress (bool): If True, displays a progress bar of the download to stderr\n",
    "        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n",
    "          but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n",
    "    \"\"\"\n",
    "    return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress)\n",
    "\n",
    "\n",
    "def densenet169(pretrained=False, progress=True, **kwargs):\n",
    "    r\"\"\"Densenet-169 model from\n",
    "    `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n",
    "\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "        progress (bool): If True, displays a progress bar of the download to stderr\n",
    "        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n",
    "          but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n",
    "    \"\"\"\n",
    "    return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress)\n",
    "\n",
    "\n",
    "def densenet201(pretrained=False, progress=True, **kwargs):\n",
    "    r\"\"\"Densenet-201 model from\n",
    "    `\"Densely Connected Convolutional Networks\" <https://arxiv.org/pdf/1608.06993.pdf>`_\n",
    "\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained on ImageNet\n",
    "        progress (bool): If True, displays a progress bar of the download to stderr\n",
    "        memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,\n",
    "          but slower. Default: *False*. See `\"paper\" <https://arxiv.org/pdf/1707.06990.pdf>`_\n",
    "    \"\"\"\n",
    "    return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## senet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "__all__ = ['se_resnet_18', 'se_resnet_34', 'se_resnet_50', 'se_resnet_101', 'se_resnet_152']\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "    def __init__(self, block, layers, num_classes=1000):\n",
    "        self.inplanes = 64\n",
    "        super(ResNet, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n",
    "                               bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n",
    "        self.layer1 = self._make_layer(block, 64, layers[0])\n",
    "        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n",
    "        self.avgpool = nn.AvgPool2d(7, stride=1)\n",
    "        self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "        for m in self.modules():\n",
    "            if isinstance(m, nn.Conv2d):\n",
    "                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n",
    "                m.weight.data.normal_(0, math.sqrt(2. / n))\n",
    "            elif isinstance(m, nn.BatchNorm2d):\n",
    "                m.weight.data.fill_(1)\n",
    "                m.bias.data.zero_()\n",
    "\n",
    "    def _make_layer(self, block, planes, blocks, stride=1):\n",
    "        downsample = None\n",
    "        if stride != 1 or self.inplanes != planes * block.expansion:\n",
    "            downsample = nn.Sequential(\n",
    "                nn.Conv2d(self.inplanes, planes * block.expansion,\n",
    "                          kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(planes * block.expansion),\n",
    "            )\n",
    "        layers = []\n",
    "        layers.append(block(self.inplanes, planes, stride, downsample))\n",
    "        self.inplanes = planes * block.expansion\n",
    "        for i in range(1, blocks):\n",
    "            layers.append(block(self.inplanes, planes))\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.bn1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.maxpool(x)\n",
    "        x = self.layer1(x)\n",
    "        x = self.layer2(x)\n",
    "        x = self.layer3(x)\n",
    "        x = self.layer4(x)\n",
    "\n",
    "        x = self.avgpool(x)\n",
    "        x = x.view(x.size(0), -1)\n",
    "        x = self.fc(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "def conv3x3(in_planes, out_planes, stride=1):\n",
    "    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "\n",
    "\n",
    "class SELayer(nn.Module):\n",
    "    def __init__(self, channel, reduction=16):\n",
    "        super(SELayer, self).__init__()\n",
    "        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n",
    "        self.fc = nn.Sequential(\n",
    "            nn.Linear(channel, channel // reduction, bias=False),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Linear(channel // reduction, channel, bias=False),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "\n",
    "    def forward(self, x):\n",
    "        b, c, _, _ = x.size()\n",
    "        y = self.avg_pool(x).view(b, c)\n",
    "        y = self.fc(y).view(b, c, 1, 1)\n",
    "        return x * y.expand_as(x)\n",
    "\n",
    "\n",
    "class SEBasicBlock(nn.Module):\n",
    "    expansion = 1\n",
    "\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):\n",
    "        super(SEBasicBlock, self).__init__()\n",
    "        self.conv1 = conv3x3(inplanes, planes, stride)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.conv2 = conv3x3(planes, planes, 1)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.se = SELayer(planes, reduction)\n",
    "        self.downsample = downsample\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.se(out)\n",
    "        if self.downsample is not None:\n",
    "            residual = self.downsample(x)\n",
    "        out += residual\n",
    "        out = self.relu(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "class SEBottleneck(nn.Module):\n",
    "    expansion = 4\n",
    "    def __init__(self, inplanes, planes, stride=1, downsample=None, reduction=16):\n",
    "        super(SEBottleneck, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n",
    "                               padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n",
    "        self.bn3 = nn.BatchNorm2d(planes * 4)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.se = SELayer(planes * 4, reduction)\n",
    "        self.downsample = downsample\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, x):\n",
    "        residual = x\n",
    "        out = self.conv1(x)\n",
    "        out = self.bn1(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv2(out)\n",
    "        out = self.bn2(out)\n",
    "        out = self.relu(out)\n",
    "\n",
    "        out = self.conv3(out)\n",
    "        out = self.bn3(out)\n",
    "        out = self.se(out)\n",
    "\n",
    "        if self.downsample is not None:\n",
    "            residual = self.downsample(x)\n",
    "        out += residual\n",
    "        out = self.relu(out)\n",
    "        return out\n",
    "\n",
    "\n",
    "def se_resnet18(num_classes=5):\n",
    "    \"\"\"Constructs a ResNet-18 model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained\n",
    "    \"\"\"\n",
    "    model = ResNet(SEBasicBlock, [2, 2, 2, 2], num_classes=num_classes)\n",
    "    model.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "    return model\n",
    "\n",
    "\n",
    "def se_resnet34(num_classes=5):\n",
    "    \"\"\"Constructs a ResNet-34 model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model \n",
    "    \"\"\"\n",
    "    model = ResNet(SEBasicBlock, [3, 4, 6, 3], num_classes=num_classes)\n",
    "    model.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "    return model\n",
    "\n",
    "\n",
    "def se_resnet50(num_classes=5, pretrained=False):\n",
    "    \"\"\"Constructs a ResNet-50 model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model \n",
    "    \"\"\"\n",
    "    model = ResNet(SEBottleneck, [3, 4, 6, 3], num_classes=num_classes)\n",
    "    model.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "    return model\n",
    "\n",
    "\n",
    "def se_resnet101(num_classes=5):\n",
    "    \"\"\"Constructs a ResNet-101 model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model\n",
    "    \"\"\"\n",
    "    model = ResNet(SEBottleneck, [3, 4, 23, 3], num_classes=num_classes)\n",
    "    model.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "    return model\n",
    "\n",
    "\n",
    "def se_resnet152(**kwargs):\n",
    "    \"\"\"Constructs a ResNet-152 model.\n",
    "    Args:\n",
    "        pretrained (bool): If True, returns a model pre-trained\n",
    "    \"\"\"\n",
    "    model = ResNet(SEBottleneck, [3, 8, 36, 3], num_classes = kwargs[\"num_classes\"])\n",
    "    model.avgpool = nn.AdaptiveAvgPool2d(1)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "catalog = {\n",
    "    \"resnext\": resnext50_32x4d,\n",
    "    \"densenet\": densenet121,\n",
    "    \"senet\": se_resnet152,\n",
    "}\n",
    "\n",
    "def get_model(**config):\n",
    "    return catalog[config[\"model\"]](**config)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Trainer:\n",
    "    def __init__(self, **kwargs):\n",
    "        self.kwargs = kwargs\n",
    "        self.epoch = kwargs[\"epoch\"]\n",
    "        self.lr = kwargs[\"lr\"]\n",
    "        \n",
    "        self.dataloader = get_dataloader(**kwargs)\n",
    "        self.model = get_model(**kwargs)\n",
    "        self.opt = self.get_optimizer(kwargs[\"opt\"])\n",
    "        self.loss = self.get_loss_func(kwargs[\"loss\"])\n",
    "        \n",
    "        if(not kwargs[\"check_point_path\"] == \"\"):\n",
    "            self.model.load_state_dict(torch.load(kwargs[\"check_point_path\"]))\n",
    "        \n",
    "    def run(self):\n",
    "        if(torch.cuda.device_count() > 1):\n",
    "            self.model = nn.DataParallel(self.model)\n",
    "        self.model = self.model.cuda()\n",
    "            \n",
    "        for epoch in range(self.epoch):\n",
    "            for i, data in enumerate(self.dataloader):\n",
    "                img = data[1]\n",
    "                label = data[2]\n",
    "                \n",
    "                output = self.model(img.cuda())\n",
    "                self.opt.zero_grad()\n",
    "                loss = self.loss(output, label.cuda())\n",
    "                loss.backward()\n",
    "                self.opt.step()\n",
    "                \n",
    "                if(i % 100 == 0):\n",
    "                    print(i, loss)\n",
    "            \n",
    "            # 默认每5个epoch保存一次\n",
    "            if(epoch >= 4 and (epoch + 1) % 5 == 0):\n",
    "                if(not os.path.exists(\"./output\")):\n",
    "                    os.mkdir(\"./output\")\n",
    "                self.save_model(os.path.join(\"./output\", self.kwargs[\"model\"] + \"_\" + str(epoch)))\n",
    "                \n",
    "        # 清除显存\n",
    "        torch.cuda.empty_cache()\n",
    "                \n",
    "    def get_loss_func(self, name):\n",
    "        lossfc_dict = {'CrossEntropy': nn.CrossEntropyLoss()}\n",
    "        return lossfc_dict.get(name)\n",
    "\n",
    "    def get_optimizer(self, name):\n",
    "        optimizer_dict = {\n",
    "            'SGD': torch.optim.SGD(self.model.parameters(), lr=self.lr),\n",
    "            'Adam': torch.optim.Adam(self.model.parameters(), lr=self.lr),\n",
    "            'AdamW': torch.optim.AdamW(self.model.parameters(), lr=self.lr),\n",
    "        }\n",
    "        return optimizer_dict.get(name)\n",
    "    \n",
    "    def save_model(self, path):\n",
    "        if(torch.cuda.device_count() > 1):\n",
    "            torch.save(self.model.module.state_dict(), path)\n",
    "        else:\n",
    "            torch.save(self.model.state_dict(), path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Test:\n",
    "    def __init__(self, **kwargs):\n",
    "        self.kwargs = kwargs\n",
    "        \n",
    "        self.dataloader = get_dataloader(**kwargs)\n",
    "        self.model = get_model(**kwargs)\n",
    "        self.model.load_state_dict(torch.load(kwargs[\"model_path\"]))\n",
    "        self.out_path = kwargs[\"out_path\"]\n",
    "        \n",
    "    def predict(self,):\n",
    "        self.model = self.model.cuda()\n",
    "        \n",
    "        results = []\n",
    "        for i, data in enumerate(self.dataloader):\n",
    "            image = data[1].cuda()\n",
    "            output = self.model(image)\n",
    "            results += torch.max(output.data, 1)[1].cpu().detach().numpy().tolist()\n",
    "        self.results = results\n",
    "        \n",
    "        # 清除显存\n",
    "        torch.cuda.empty_cache()\n",
    "        \n",
    "    def out(self,):\n",
    "        out = pd.read_csv(self.kwargs[\"csv_path\"])\n",
    "        out['label'] = self.results\n",
    "        out.to_csv(self.out_path, index = False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# main"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-----------------------------------------------\n",
      "----------- Loading Training Images -----------\n",
      "-----------------------------------------------\n",
      "Image dire:    ./input/train_images\n",
      "Label path:    ./input/train.csv\n",
      "Image w:256   h:256\n",
      "Augment scale: 2\n",
      "Load finished! num:42794\n",
      "0 tensor(1.5902, device='cuda:0', grad_fn=<NllLossBackward>)\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-9-c4eef9418c94>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     31\u001b[0m \u001b[0mt1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mTrainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 32\u001b[0;31m \u001b[0mt1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-7-b0bbc14a046b>\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     27\u001b[0m                 \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     28\u001b[0m                 \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 29\u001b[0;31m                 \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mopt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     30\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     31\u001b[0m                 \u001b[0;32mif\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;36m100\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda3/lib/python3.7/site-packages/torch/optim/adamw.py\u001b[0m in \u001b[0;36mstep\u001b[0;34m(self, closure)\u001b[0m\n\u001b[1;32m    106\u001b[0m                     \u001b[0mdenom\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mmax_exp_avg_sq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbias_correction2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgroup\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'eps'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    107\u001b[0m                 \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 108\u001b[0;31m                     \u001b[0mdenom\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mexp_avg_sq\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mmath\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbias_correction2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0madd_\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgroup\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'eps'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    109\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    110\u001b[0m                 \u001b[0mstep_size\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mgroup\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'lr'\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mbias_correction1\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "config = {\n",
    "    # all\n",
    "    \"batch_size\": 240,\n",
    "    \"h\": 256,\n",
    "    \"w\": 256,\n",
    "    \n",
    "    # dataloader\n",
    "    \"data_path\": \"./input/train_images\",    \n",
    "    \"csv_path\": \"./input/train.csv\",\n",
    "    \"aug_scale\": 2,    # 图片增强倍数\n",
    "    \"shuffle\": True,    # 是否打乱\n",
    "    \"drop_last\": False,    # 是否丢弃data中最后一个不足batch_size的数据\n",
    "    \"num_workers\": 20,    # dataloader使用进程数\n",
    "    \n",
    "    # model\n",
    "    \"model\": \"densenet\",\n",
    "    \"num_classes\": 5,\n",
    "    \"check_point_path\": \"\",    # 如果模型需要从断点开始训练，填写加载路径\n",
    "    \n",
    "    # trainer\n",
    "    \"epoch\": 50,\n",
    "    \"opt\": \"AdamW\",\n",
    "    \"loss\": \"CrossEntropy\",\n",
    "    \"lr\": 0.01,\n",
    "    \n",
    "    # test\n",
    "    \"model_path\": \"./output/\",    # 测试时加载的模型路径\n",
    "    \"out_path\": \"./submission.csv\",    # 生成的提交csv文件\n",
    "}\n",
    "\n",
    "t1 = Trainer(**config)\n",
    "t1.run()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## test demo"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "'''\n",
    "config = {\n",
    "    # all\n",
    "    \"batch_size\": 32,\n",
    "    \"h\": 256,\n",
    "    \"w\": 256,\n",
    "    \n",
    "    # dataloader\n",
    "    \"data_path\": \"../input/cassava-leaf-disease-classification/test_images\",\n",
    "    \"csv_path\": \"../input/cassava-leaf-disease-classification/sample_submission.csv\",\n",
    "    \"aug_scale\": 1,\n",
    "    \"shuffle\": False,\n",
    "    \"drop_last\": False,\n",
    "    \"num_workers\": 1,\n",
    "    \n",
    "    # model\n",
    "    \"model\": \"densenet\",\n",
    "    \"num_classes\": 5,\n",
    "    \n",
    "    # trainer\n",
    "    \"epoch\": 50,\n",
    "    \"opt\": \"AdamW\",\n",
    "    \"loss\": \"CrossEntropy\",\n",
    "    \"lr\": 0.01,\n",
    "    \n",
    "    # test\n",
    "    \"model_path\": \"../input/desnet-v0/densenet_39\",\n",
    "    \"out_path\": \"submission.csv\",\n",
    "}\n",
    "\n",
    "t1 = Test(**config)\n",
    "t1.predict()\n",
    "t1.out()\n",
    "'''"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
