{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torchvision\n",
    "import torch.optim as optim\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import DataLoader\n",
    "from train_vgg import *\n",
    "from data_utils import *\n",
    "from math import ceil\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.backends.cudnn.benchmark = True\n",
    "vgg16 = torchvision.models.vgg16(pretrained=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "VGG(\n",
       "  (features): Sequential(\n",
       "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (1): ReLU(inplace=True)\n",
       "    (2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (3): ReLU(inplace=True)\n",
       "    (4): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (5): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (6): ReLU(inplace=True)\n",
       "    (7): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (8): ReLU(inplace=True)\n",
       "    (9): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (10): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (11): ReLU(inplace=True)\n",
       "    (12): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (13): ReLU(inplace=True)\n",
       "    (14): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (15): ReLU(inplace=True)\n",
       "    (16): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (17): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (18): ReLU(inplace=True)\n",
       "    (19): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (20): ReLU(inplace=True)\n",
       "    (21): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (22): ReLU(inplace=True)\n",
       "    (23): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "    (24): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (25): ReLU(inplace=True)\n",
       "    (26): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (27): ReLU(inplace=True)\n",
       "    (28): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
       "    (29): ReLU(inplace=True)\n",
       "    (30): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)\n",
       "  )\n",
       "  (avgpool): AdaptiveAvgPool2d(output_size=(7, 7))\n",
       "  (classifier): Sequential(\n",
       "    (0): Linear(in_features=25088, out_features=4096, bias=True)\n",
       "    (1): ReLU(inplace=True)\n",
       "    (2): Dropout(p=0.5, inplace=False)\n",
       "    (3): Linear(in_features=4096, out_features=4096, bias=True)\n",
       "    (4): ReLU(inplace=True)\n",
       "    (5): Dropout(p=0.5, inplace=False)\n",
       "    (6): Linear(in_features=4096, out_features=257, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vgg16.classifier._modules['6'] = nn.Linear(in_features=4096, out_features=257, bias=True)\n",
    "vgg16.cuda()\n",
    "# vgg16"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def weights_init(m):\n",
    "    \"\"\"\n",
    "    用于初始化模型的权值和偏置\n",
    "    \"\"\"\n",
    "    classname = m.__class__.__name__\n",
    "    if classname.find('Conv') != -1:\n",
    "        m.weight.data.normal_(0.0, 0.02)\n",
    "    elif classname.find('BatchNorm') != -1:\n",
    "        m.weight.data.normal_(1.0, 0.02)\n",
    "        m.bias.data.fill_(0)\n",
    "    elif classname.find('Linear') != -1:\n",
    "        m.weight.data.normal_(0.0, 0.02)\n",
    "        if m.bias is not None:\n",
    "            m.bias.data.fill_(0.0)\n",
    "\n",
    "# vgg16.apply(weights_init)\n",
    "\n",
    "# param_tmp = vgg16.named_parameters()\n",
    "# for param in param_tmp:\n",
    "#     print(param[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "classifier_weights = [p for n, p in vgg16.named_parameters()\n",
    "                        if 'classifier' in n and 'weight' in n]\n",
    "classifier_biases = [p for n, p in vgg16.named_parameters()\n",
    "                        if 'classifier' in n and 'bias' in n]\n",
    "features_weights = [\n",
    "    p for n, p in vgg16.named_parameters()\n",
    "    if 'features' in n and 'weight' in n\n",
    "]\n",
    "\n",
    "features_bias = [p for n, p in vgg16.named_parameters()\n",
    "                    if 'features' in n and 'bias' in n]\n",
    "# features_bn_weights = [\n",
    "#     p for n, p in vgg16.named_parameters()\n",
    "#     if 'weight' in n and ('bn' in n or 'downsample.1' in n)\n",
    "# ]\n",
    "# features_bn_biases = [\n",
    "#     p for n, p in vgg16.named_parameters()\n",
    "#     if 'bias' in n and 'bn' in n\n",
    "# ]\n",
    "\n",
    "# you can set different learning rates\n",
    "classifier_lr = 1e-2\n",
    "features_lr = 1e-2\n",
    "# but they are not actually used (because lr_scheduler is used)\n",
    "\n",
    "params = [\n",
    "    {'params': classifier_weights, 'lr': classifier_lr, 'weight_decay': 1e-3},\n",
    "    {'params': classifier_biases, 'lr': classifier_lr},\n",
    "    {'params': features_weights, 'lr': features_lr, 'weight_decay': 1e-3},\n",
    "    {'params': features_bias, 'lr': features_lr}\n",
    "    # {'params': features_bn_weights, 'lr': features_lr},\n",
    "    # {'params': features_bn_biases, 'lr': features_lr}\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# param = []\n",
    "# for p in vgg16.parameters():\n",
    "#     if p.requires_grad == True:\n",
    "#         param.append(p)\n",
    "\n",
    "optimizer = optim.SGD(params, momentum=0.9, nesterov=True)\n",
    "\n",
    "criterion = nn.CrossEntropyLoss()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "24387\n"
     ]
    }
   ],
   "source": [
    "batch_size = 16\n",
    "train_folder, val_folder = get_folders()\n",
    "\n",
    "train_iterator = DataLoader(\n",
    "    train_folder, batch_size=batch_size, num_workers=0,\n",
    "    shuffle=True, pin_memory=False\n",
    ")\n",
    "\n",
    "val_iterator = DataLoader(\n",
    "    val_folder, batch_size=16, num_workers=0,\n",
    "    shuffle=False, pin_memory=False\n",
    ")\n",
    "\n",
    "# number of training samples\n",
    "train_size = len(train_folder.imgs)\n",
    "print(train_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1525\n"
     ]
    }
   ],
   "source": [
    "n_epochs = 50\n",
    "n_batches = ceil(train_size/batch_size)\n",
    "print(n_batches)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# cyclical cosine annealing\n",
    "# it changes the learning rate on every optimization step\n",
    "# 1e-6 is the minimal learning rate\n",
    "M = 1 \n",
    "# total number of optimization steps\n",
    "T = n_batches*n_epochs \n",
    "# initial learning rates\n",
    "initial1 = 1e-2\n",
    "initial2 = 1e-3\n",
    "\n",
    "\n",
    "def lr_scheduler(optimizer, step):\n",
    "    \n",
    "    global initial1\n",
    "    global initial2\n",
    "    decay = np.cos(np.pi*((step - 1) % (T // M))/(T // M)) + 1.0\n",
    "    \n",
    "    # params of the last fc layer\n",
    "    for param_group in optimizer.param_groups[:2]:\n",
    "        param_group['lr'] = ((initial1 - 1e-6)*decay/2.0) + 1e-6\n",
    "    \n",
    "    # params of the last two vgg16 blocks\n",
    "    for param_group in optimizer.param_groups[2:]:\n",
    "        param_group['lr'] = ((initial2 - 1e-6)*decay/2.0) + 1e-6\n",
    "    \n",
    "    if (step - 1) % (T // M) == 0 and step != 1:\n",
    "        print('lr is reset')\n",
    "        \n",
    "    return optimizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "validation_step: 1.00  avg_train_loss: 2.678  avg_val_loss: 2.331  avg_train_acc: 0.419  avg_val_acc: 0.471  time_per_val_step: 536.221\n",
      "saving model for epoch 0.\n",
      "validation_step: 2.00  avg_train_loss: 1.954  avg_val_loss: 2.014  avg_train_acc: 0.545  avg_val_acc: 0.545  time_per_val_step: 532.269\n",
      "validation_step: 3.00  avg_train_loss: 1.711  avg_val_loss: 1.859  avg_train_acc: 0.595  avg_val_acc: 0.576  time_per_val_step: 532.192\n",
      "validation_step: 4.00  avg_train_loss: 1.559  avg_val_loss: 1.876  avg_train_acc: 0.621  avg_val_acc: 0.572  time_per_val_step: 398.225\n",
      "validation_step: 5.00  avg_train_loss: 1.461  avg_val_loss: 1.807  avg_train_acc: 0.650  avg_val_acc: 0.578  time_per_val_step: 395.832\n",
      "validation_step: 6.00  avg_train_loss: 1.405  avg_val_loss: 1.844  avg_train_acc: 0.660  avg_val_acc: 0.589  time_per_val_step: 498.764\n",
      "saving model for epoch 5.\n",
      "validation_step: 7.00  avg_train_loss: 1.318  avg_val_loss: 1.840  avg_train_acc: 0.680  avg_val_acc: 0.579  time_per_val_step: 507.292\n",
      "validation_step: 8.00  avg_train_loss: 1.272  avg_val_loss: 1.829  avg_train_acc: 0.692  avg_val_acc: 0.592  time_per_val_step: 506.792\n",
      "validation_step: 9.00  avg_train_loss: 1.228  avg_val_loss: 1.808  avg_train_acc: 0.695  avg_val_acc: 0.582  time_per_val_step: 507.965\n",
      "validation_step: 10.00  avg_train_loss: 1.198  avg_val_loss: 1.976  avg_train_acc: 0.708  avg_val_acc: 0.560  time_per_val_step: 507.142\n",
      "validation_step: 11.00  avg_train_loss: 1.145  avg_val_loss: 1.717  avg_train_acc: 0.715  avg_val_acc: 0.607  time_per_val_step: 507.051\n",
      "saving model for epoch 10.\n",
      "validation_step: 12.00  avg_train_loss: 1.092  avg_val_loss: 1.704  avg_train_acc: 0.729  avg_val_acc: 0.608  time_per_val_step: 437.672\n",
      "validation_step: 13.00  avg_train_loss: 1.062  avg_val_loss: 2.019  avg_train_acc: 0.738  avg_val_acc: 0.561  time_per_val_step: 396.225\n",
      "validation_step: 14.00  avg_train_loss: 1.002  avg_val_loss: 1.860  avg_train_acc: 0.752  avg_val_acc: 0.587  time_per_val_step: 395.973\n",
      "validation_step: 15.00  avg_train_loss: 0.955  avg_val_loss: 1.835  avg_train_acc: 0.760  avg_val_acc: 0.603  time_per_val_step: 466.251\n",
      "validation_step: 16.00  avg_train_loss: 0.912  avg_val_loss: 1.751  avg_train_acc: 0.772  avg_val_acc: 0.607  time_per_val_step: 564.941\n",
      "saving model for epoch 15.\n",
      "validation_step: 17.00  avg_train_loss: 0.892  avg_val_loss: 1.894  avg_train_acc: 0.775  avg_val_acc: 0.599  time_per_val_step: 565.371\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-10-5b58636a7caa>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      3\u001b[0m     \u001b[0mtrain_iterator\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_epochs\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_batches\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m     \u001b[0mval_iterator\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mvalidation_step\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m1525\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_validation_batches\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m80\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 5\u001b[1;33m     \u001b[0msaving_epoch\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m5\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mlr_scheduler\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mlr_scheduler\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      6\u001b[0m )\n",
      "\u001b[1;32md:\\A-DATA_STORAGE\\大三上学期\\CV\\实验5\\train_vgg.py\u001b[0m in \u001b[0;36mtrain\u001b[1;34m(model, criterion, optimizer, train_iterator, n_epochs, n_batches, val_iterator, validation_step, n_validation_batches, saving_epoch, lr_scheduler)\u001b[0m\n\u001b[0;32m    113\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    114\u001b[0m             batch_loss, batch_accuracy = optimization_step(\n\u001b[1;32m--> 115\u001b[1;33m                 \u001b[0mmodel\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcriterion\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mx_batch\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0my_batch\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    116\u001b[0m             )\n\u001b[0;32m    117\u001b[0m             \u001b[0mrunning_loss\u001b[0m \u001b[1;33m+=\u001b[0m \u001b[0mbatch_loss\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32md:\\A-DATA_STORAGE\\大三上学期\\CV\\实验5\\train_vgg.py\u001b[0m in \u001b[0;36moptimization_step\u001b[1;34m(model, criterion, optimizer, x_batch, y_batch)\u001b[0m\n\u001b[0;32m     55\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     56\u001b[0m     \u001b[0mx_batch\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx_batch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrequires_grad_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 57\u001b[1;33m     \u001b[0mx_batch\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mx_batch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     58\u001b[0m     \u001b[0my_batch\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0my_batch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrequires_grad_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;32mFalse\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     59\u001b[0m     \u001b[0my_batch\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0my_batch\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnon_blocking\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "all_losses = train(\n",
    "    vgg16, criterion, optimizer, \n",
    "    train_iterator, n_epochs, n_batches, \n",
    "    val_iterator, validation_step=1525, n_validation_batches=80, \n",
    "    saving_epoch=5, lr_scheduler=lr_scheduler\n",
    ")\n"
   ]
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "494899efd6527d56ea7f55c588d0081523a17dc3a9ff1107f3394ad815ff2527"
  },
  "kernelspec": {
   "display_name": "Python 3.7.7 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
