{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torchvision\n",
    "import torch.optim as optim\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader\n",
    "from train import *\n",
    "from data_utils import *\n",
    "from math import ceil\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ResNet(\n",
       "  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  (relu): ReLU(inplace=True)\n",
       "  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "  (layer1): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer2): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer3): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (4): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (5): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer4): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "  (fc): Linear(in_features=2048, out_features=1000, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "torch.backends.cudnn.benchmark = True\n",
    "resnet50 = torchvision.models.resnet50(pretrained=True)\n",
    "resnet50.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "resnet50.fc.out_features = 257"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def weights_init(m):\n",
    "    \"\"\"\n",
    "    用于初始化模型的权值和偏置\n",
    "    \"\"\"\n",
    "    classname = m.__class__.__name__\n",
    "    if classname.find('Conv') != -1:\n",
    "        m.weight.data.normal_(0.0, 0.02)\n",
    "    elif classname.find('BatchNorm') != -1:\n",
    "        m.weight.data.normal_(1.0, 0.02)\n",
    "        m.bias.data.fill_(0)\n",
    "    elif classname.find('Linear') != -1:\n",
    "        m.weight.data.normal_(0.0, 0.02)\n",
    "        if m.bias is not None:\n",
    "            m.bias.data.fill_(0.0)\n",
    "\n",
    "# resnet50.apply(weights_init)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "classifier_weights = [resnet50.fc.weight]\n",
    "classifier_biases = [resnet50.fc.bias]\n",
    "features_weights = [\n",
    "    p for n, p in resnet50.named_parameters()\n",
    "    if 'conv' in n\n",
    "]\n",
    "features_weights += [\n",
    "    p for n, p in resnet50.named_parameters()\n",
    "    if 'downsample.0' in n and 'weight' in n\n",
    "]\n",
    "features_bn_weights = [\n",
    "    p for n, p in resnet50.named_parameters()\n",
    "    if 'weight' in n and ('bn' in n or 'downsample.1' in n)\n",
    "]\n",
    "features_bn_biases = [\n",
    "    p for n, p in resnet50.named_parameters()\n",
    "    if 'bias' in n and 'bn' in n\n",
    "]\n",
    "\n",
    "# you can set different learning rates\n",
    "classifier_lr = 1e-2\n",
    "features_lr = 1e-2\n",
    "# but they are not actually used (because lr_scheduler is used)\n",
    "\n",
    "params = [\n",
    "    {'params': classifier_weights, 'lr': classifier_lr, 'weight_decay': 1e-3},\n",
    "    {'params': classifier_biases, 'lr': classifier_lr},\n",
    "    {'params': features_weights, 'lr': features_lr, 'weight_decay': 1e-3},\n",
    "    {'params': features_bn_weights, 'lr': features_lr},\n",
    "    {'params': features_bn_biases, 'lr': features_lr}\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# param = []\n",
    "# for p in resnet50.parameters():\n",
    "#     if p.requires_grad == True:\n",
    "#         param.append(p)\n",
    "\n",
    "optimizer = optim.SGD(params, momentum=0.9, nesterov=True)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "24387\n"
     ]
    }
   ],
   "source": [
    "batch_size = 16\n",
    "train_folder, val_folder = get_folders()\n",
    "\n",
    "train_iterator = DataLoader(\n",
    "    train_folder, batch_size=batch_size, num_workers=0,\n",
    "    shuffle=True, pin_memory=False\n",
    ")\n",
    "\n",
    "val_iterator = DataLoader(\n",
    "    val_folder, batch_size=16, num_workers=0,\n",
    "    shuffle=False, pin_memory=False\n",
    ")\n",
    "\n",
    "# number of training samples\n",
    "train_size = len(train_folder.imgs)\n",
    "print(train_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1525\n"
     ]
    }
   ],
   "source": [
    "n_epochs = 50\n",
    "n_batches = ceil(train_size/batch_size)\n",
    "print(n_batches)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cyclical cosine annealing\n",
    "# it changes the learning rate on every optimization step\n",
    "# 1e-6 is the minimal learning rate\n",
    "M = 1 \n",
    "# total number of optimization steps\n",
    "T = n_batches*n_epochs \n",
    "# initial learning rates\n",
    "initial1 = 1e-2\n",
    "initial2 = 1e-3\n",
    "\n",
    "\n",
    "def lr_scheduler(optimizer, step):\n",
    "    \n",
    "    global initial1\n",
    "    global initial2\n",
    "    decay = np.cos(np.pi*((step - 1) % (T // M))/(T // M)) + 1.0\n",
    "    \n",
    "    # params of the last fc layer\n",
    "    for param_group in optimizer.param_groups[:2]:\n",
    "        param_group['lr'] = ((initial1 - 1e-6)*decay/2.0) + 1e-6\n",
    "    \n",
    "    # params of the last two resnet blocks\n",
    "    for param_group in optimizer.param_groups[2:]:\n",
    "        param_group['lr'] = ((initial2 - 1e-6)*decay/2.0) + 1e-6\n",
    "    \n",
    "    if (step - 1) % (T // M) == 0 and step != 1:\n",
    "        print('lr is reset')\n",
    "        \n",
    "    return optimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "validation_step: 1.00  avg_train_loss: 2.836  avg_val_loss: 1.966  avg_train_acc: 0.429  avg_val_acc: 0.578  time_per_val_step: 401.110\n",
      "saving model for epoch 0.\n",
      "validation_step: 2.00  avg_train_loss: 1.446  avg_val_loss: 1.554  avg_train_acc: 0.653  avg_val_acc: 0.642  time_per_val_step: 314.582\n",
      "validation_step: 3.00  avg_train_loss: 1.172  avg_val_loss: 1.437  avg_train_acc: 0.712  avg_val_acc: 0.682  time_per_val_step: 341.626\n",
      "validation_step: 4.00  avg_train_loss: 1.007  avg_val_loss: 1.629  avg_train_acc: 0.749  avg_val_acc: 0.659  time_per_val_step: 343.527\n",
      "validation_step: 5.00  avg_train_loss: 0.896  avg_val_loss: 1.369  avg_train_acc: 0.774  avg_val_acc: 0.697  time_per_val_step: 323.046\n",
      "validation_step: 6.00  avg_train_loss: 0.795  avg_val_loss: 1.333  avg_train_acc: 0.796  avg_val_acc: 0.701  time_per_val_step: 345.258\n",
      "saving model for epoch 5.\n",
      "validation_step: 7.00  avg_train_loss: 0.723  avg_val_loss: 1.256  avg_train_acc: 0.817  avg_val_acc: 0.722  time_per_val_step: 342.182\n",
      "validation_step: 8.00  avg_train_loss: 0.663  avg_val_loss: 1.424  avg_train_acc: 0.826  avg_val_acc: 0.695  time_per_val_step: 346.426\n",
      "validation_step: 9.00  avg_train_loss: 0.626  avg_val_loss: 1.328  avg_train_acc: 0.837  avg_val_acc: 0.715  time_per_val_step: 346.349\n",
      "validation_step: 10.00  avg_train_loss: 0.573  avg_val_loss: 1.237  avg_train_acc: 0.852  avg_val_acc: 0.738  time_per_val_step: 346.403\n",
      "validation_step: 11.00  avg_train_loss: 0.528  avg_val_loss: 1.252  avg_train_acc: 0.865  avg_val_acc: 0.734  time_per_val_step: 344.085\n",
      "saving model for epoch 10.\n",
      "validation_step: 12.00  avg_train_loss: 0.493  avg_val_loss: 1.304  avg_train_acc: 0.873  avg_val_acc: 0.722  time_per_val_step: 343.864\n",
      "validation_step: 13.00  avg_train_loss: 0.460  avg_val_loss: 1.179  avg_train_acc: 0.879  avg_val_acc: 0.733  time_per_val_step: 345.810\n",
      "validation_step: 14.00  avg_train_loss: 0.418  avg_val_loss: 1.223  avg_train_acc: 0.892  avg_val_acc: 0.735  time_per_val_step: 345.455\n",
      "validation_step: 15.00  avg_train_loss: 0.368  avg_val_loss: 1.322  avg_train_acc: 0.905  avg_val_acc: 0.729  time_per_val_step: 347.130\n",
      "validation_step: 16.00  avg_train_loss: 0.366  avg_val_loss: 1.230  avg_train_acc: 0.906  avg_val_acc: 0.744  time_per_val_step: 346.996\n",
      "saving model for epoch 15.\n",
      "validation_step: 17.00  avg_train_loss: 0.316  avg_val_loss: 1.165  avg_train_acc: 0.922  avg_val_acc: 0.754  time_per_val_step: 347.533\n",
      "validation_step: 18.00  avg_train_loss: 0.301  avg_val_loss: 1.223  avg_train_acc: 0.925  avg_val_acc: 0.746  time_per_val_step: 345.537\n",
      "validation_step: 19.00  avg_train_loss: 0.283  avg_val_loss: 1.105  avg_train_acc: 0.930  avg_val_acc: 0.762  time_per_val_step: 347.429\n",
      "validation_step: 20.00  avg_train_loss: 0.268  avg_val_loss: 1.168  avg_train_acc: 0.937  avg_val_acc: 0.750  time_per_val_step: 347.136\n",
      "validation_step: 21.00  avg_train_loss: 0.228  avg_val_loss: 1.126  avg_train_acc: 0.945  avg_val_acc: 0.756  time_per_val_step: 347.272\n",
      "saving model for epoch 20.\n",
      "validation_step: 22.00  avg_train_loss: 0.214  avg_val_loss: 1.003  avg_train_acc: 0.952  avg_val_acc: 0.783  time_per_val_step: 347.179\n",
      "validation_step: 23.00  avg_train_loss: 0.195  avg_val_loss: 1.025  avg_train_acc: 0.956  avg_val_acc: 0.771  time_per_val_step: 346.534\n",
      "validation_step: 24.00  avg_train_loss: 0.178  avg_val_loss: 1.101  avg_train_acc: 0.961  avg_val_acc: 0.768  time_per_val_step: 346.824\n",
      "validation_step: 25.00  avg_train_loss: 0.157  avg_val_loss: 1.023  avg_train_acc: 0.966  avg_val_acc: 0.775  time_per_val_step: 313.211\n",
      "validation_step: 26.00  avg_train_loss: 0.151  avg_val_loss: 1.081  avg_train_acc: 0.968  avg_val_acc: 0.772  time_per_val_step: 297.339\n",
      "saving model for epoch 25.\n",
      "validation_step: 27.00  avg_train_loss: 0.135  avg_val_loss: 1.067  avg_train_acc: 0.973  avg_val_acc: 0.779  time_per_val_step: 298.319\n",
      "validation_step: 28.00  avg_train_loss: 0.134  avg_val_loss: 0.982  avg_train_acc: 0.973  avg_val_acc: 0.782  time_per_val_step: 299.538\n",
      "validation_step: 29.00  avg_train_loss: 0.121  avg_val_loss: 1.036  avg_train_acc: 0.978  avg_val_acc: 0.776  time_per_val_step: 298.505\n",
      "validation_step: 30.00  avg_train_loss: 0.118  avg_val_loss: 1.049  avg_train_acc: 0.978  avg_val_acc: 0.775  time_per_val_step: 298.643\n",
      "validation_step: 31.00  avg_train_loss: 0.108  avg_val_loss: 0.947  avg_train_acc: 0.981  avg_val_acc: 0.784  time_per_val_step: 298.856\n",
      "saving model for epoch 30.\n",
      "validation_step: 32.00  avg_train_loss: 0.098  avg_val_loss: 1.066  avg_train_acc: 0.984  avg_val_acc: 0.785  time_per_val_step: 295.630\n",
      "validation_step: 33.00  avg_train_loss: 0.094  avg_val_loss: 1.049  avg_train_acc: 0.984  avg_val_acc: 0.781  time_per_val_step: 289.617\n",
      "validation_step: 34.00  avg_train_loss: 0.090  avg_val_loss: 0.927  avg_train_acc: 0.986  avg_val_acc: 0.793  time_per_val_step: 290.728\n",
      "validation_step: 35.00  avg_train_loss: 0.085  avg_val_loss: 0.953  avg_train_acc: 0.988  avg_val_acc: 0.786  time_per_val_step: 290.186\n",
      "validation_step: 36.00  avg_train_loss: 0.082  avg_val_loss: 0.961  avg_train_acc: 0.988  avg_val_acc: 0.788  time_per_val_step: 289.975\n",
      "saving model for epoch 35.\n",
      "validation_step: 37.00  avg_train_loss: 0.079  avg_val_loss: 0.971  avg_train_acc: 0.988  avg_val_acc: 0.774  time_per_val_step: 290.273\n",
      "validation_step: 38.00  avg_train_loss: 0.076  avg_val_loss: 0.998  avg_train_acc: 0.989  avg_val_acc: 0.791  time_per_val_step: 289.641\n",
      "validation_step: 39.00  avg_train_loss: 0.078  avg_val_loss: 1.013  avg_train_acc: 0.988  avg_val_acc: 0.785  time_per_val_step: 289.738\n",
      "validation_step: 40.00  avg_train_loss: 0.070  avg_val_loss: 1.030  avg_train_acc: 0.991  avg_val_acc: 0.792  time_per_val_step: 293.392\n",
      "validation_step: 41.00  avg_train_loss: 0.073  avg_val_loss: 1.031  avg_train_acc: 0.990  avg_val_acc: 0.777  time_per_val_step: 300.242\n",
      "saving model for epoch 40.\n",
      "validation_step: 42.00  avg_train_loss: 0.068  avg_val_loss: 0.936  avg_train_acc: 0.991  avg_val_acc: 0.797  time_per_val_step: 297.577\n",
      "validation_step: 43.00  avg_train_loss: 0.066  avg_val_loss: 0.948  avg_train_acc: 0.991  avg_val_acc: 0.788  time_per_val_step: 297.790\n",
      "validation_step: 44.00  avg_train_loss: 0.067  avg_val_loss: 0.949  avg_train_acc: 0.992  avg_val_acc: 0.799  time_per_val_step: 298.374\n",
      "validation_step: 45.00  avg_train_loss: 0.065  avg_val_loss: 0.954  avg_train_acc: 0.992  avg_val_acc: 0.786  time_per_val_step: 296.942\n",
      "validation_step: 46.00  avg_train_loss: 0.063  avg_val_loss: 0.899  avg_train_acc: 0.992  avg_val_acc: 0.799  time_per_val_step: 288.577\n",
      "saving model for epoch 45.\n",
      "validation_step: 47.00  avg_train_loss: 0.062  avg_val_loss: 0.967  avg_train_acc: 0.992  avg_val_acc: 0.783  time_per_val_step: 290.018\n",
      "validation_step: 48.00  avg_train_loss: 0.065  avg_val_loss: 0.962  avg_train_acc: 0.992  avg_val_acc: 0.792  time_per_val_step: 290.622\n",
      "validation_step: 49.00  avg_train_loss: 0.062  avg_val_loss: 0.960  avg_train_acc: 0.993  avg_val_acc: 0.796  time_per_val_step: 289.134\n",
      "validation_step: 50.00  avg_train_loss: 0.063  avg_val_loss: 0.989  avg_train_acc: 0.992  avg_val_acc: 0.792  time_per_val_step: 289.407\n"
     ]
    }
   ],
   "source": [
    "all_losses = train(\n",
    "    resnet50, criterion, optimizer, \n",
    "    train_iterator, n_epochs, n_batches, \n",
    "    val_iterator, validation_step=1525, n_validation_batches=80, \n",
    "    saving_epoch=5, lr_scheduler=lr_scheduler\n",
    ")\n"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "494899efd6527d56ea7f55c588d0081523a17dc3a9ff1107f3394ad815ff2527"
  },
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
