{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6e60a252-6a49-486e-a870-ccdf463d9d7e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import time\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import torch.optim as optim\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "from torch.utils.data import DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "f2ef237d-d601-401b-949a-47e382005b0e",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torch.optim.lr_scheduler import _LRScheduler"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6a40b943-1dd9-4a04-bca4-c9a11a36aa70",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e305bccf-2b26-4a21-ad8e-e31ff71e6c3e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "a2dfc8ed-362c-4435-9767-05ad628f5037",
   "metadata": {},
   "outputs": [],
   "source": [
    "keep_matrix = np.random.uniform(0,1,size=(4, 4))\n",
    "order = keep_matrix.argsort(0)\n",
    "dist_keep = order < int(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "3bc69fff-dcc4-4ded-be6b-edfd0d68455e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[ True, False, False,  True],\n",
       "       [ True, False, False, False],\n",
       "       [False,  True,  True, False],\n",
       "       [False,  True,  True,  True]])"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "dist_keep"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6945b9f2-b916-43f1-9c89-21c3bd0874f5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "d8472edd-b4c6-4f3d-898f-22d4a3ca0568",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(epoch):\n",
    "\n",
    "    net.train()\n",
    "    for batch_index, (images, labels) in enumerate(cifar100_training_loader):\n",
    "\n",
    "        \n",
    "        labels = labels.cuda()\n",
    "        images = images.cuda()\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        outputs = net(images)\n",
    "        loss = loss_function(outputs, labels)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        n_iter = (epoch - 1) * len(cifar100_training_loader) + batch_index + 1\n",
    "\n",
    "        # print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\\tLoss: {:0.4f}\\tLR: {:0.6f}'.format(\n",
    "        #     loss.item(),\n",
    "        #     optimizer.param_groups[0]['lr'],\n",
    "        #     epoch=epoch,\n",
    "        #     trained_samples=batch_index * batch_size + len(images),\n",
    "        #     total_samples=len(cifar100_training_loader.dataset)\n",
    "        # ))\n",
    "\n",
    "        if epoch <= warm:\n",
    "            warmup_scheduler.step()\n",
    "            \n",
    "\n",
    "@torch.no_grad()\n",
    "def eval_training(epoch=0, tb=True):\n",
    "\n",
    "    start = time.time()\n",
    "    net.eval()\n",
    "\n",
    "    test_loss = 0.0 # cost function error\n",
    "    correct = 0.0\n",
    "\n",
    "    for (images, labels) in cifar100_test_loader:\n",
    "\n",
    "        \n",
    "        images = images.cuda()\n",
    "        labels = labels.cuda()\n",
    "\n",
    "        outputs = net(images)\n",
    "        loss = loss_function(outputs, labels)\n",
    "\n",
    "        test_loss += loss.item()\n",
    "        _, preds = outputs.max(1)\n",
    "        correct += preds.eq(labels).sum()\n",
    "\n",
    "    finish = time.time()\n",
    "\n",
    "    # print('GPU INFO.....')\n",
    "    # print(torch.cuda.memory_summary(), end='')\n",
    "    print('Evaluating Network.....')\n",
    "    print('Test set: Epoch: {}, Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(\n",
    "        epoch,\n",
    "        test_loss / len(cifar100_test_loader.dataset),\n",
    "        correct.float() / len(cifar100_test_loader.dataset),\n",
    "        finish - start\n",
    "    ))\n",
    "    print()\n",
    "\n",
    "    return correct.float() / len(cifar100_test_loader.dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "d05b219d-493e-4f8d-9501-551747a7127a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_training_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n",
    "    \"\"\" return training dataloader\n",
    "    Args:\n",
    "        mean: mean of cifar100 training dataset\n",
    "        std: std of cifar100 training dataset\n",
    "        path: path to cifar100 training python dataset\n",
    "        batch_size: dataloader batchsize\n",
    "        num_workers: dataloader num_works\n",
    "        shuffle: whether to shuffle\n",
    "    Returns: train_data_loader:torch dataloader object\n",
    "    \"\"\"\n",
    "\n",
    "    transform_train = transforms.Compose([\n",
    "        #transforms.ToPILImage(),\n",
    "        transforms.RandomCrop(32, padding=4),\n",
    "        transforms.RandomHorizontalFlip(),\n",
    "        transforms.RandomRotation(15),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "    #cifar100_training = CIFAR100Train(path, transform=transform_train)\n",
    "    cifar100_training = torchvision.datasets.CIFAR100(root='../datasets/cifar100', train=True, download=True, transform=transform_train)\n",
    "    cifar100_training_loader = DataLoader(\n",
    "        cifar100_training, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n",
    "\n",
    "    return cifar100_training_loader\n",
    "\n",
    "def get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True):\n",
    "    \"\"\" return training dataloader\n",
    "    Args:\n",
    "        mean: mean of cifar100 test dataset\n",
    "        std: std of cifar100 test dataset\n",
    "        path: path to cifar100 test python dataset\n",
    "        batch_size: dataloader batchsize\n",
    "        num_workers: dataloader num_works\n",
    "        shuffle: whether to shuffle\n",
    "    Returns: cifar100_test_loader:torch dataloader object\n",
    "    \"\"\"\n",
    "\n",
    "    transform_test = transforms.Compose([\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean, std)\n",
    "    ])\n",
    "    #cifar100_test = CIFAR100Test(path, transform=transform_test)\n",
    "    cifar100_test = torchvision.datasets.CIFAR100(root='../datasets/cifar100', train=False, download=True, transform=transform_test)\n",
    "    cifar100_test_loader = DataLoader(\n",
    "        cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size)\n",
    "\n",
    "    return cifar100_test_loader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3d13aae3-9211-4274-8cb8-ac4c00732e9d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class WarmUpLR(_LRScheduler):\n",
    "    \"\"\"warmup_training learning rate scheduler\n",
    "    Args:\n",
    "        optimizer: optimzier(e.g. SGD)\n",
    "        total_iters: totoal_iters of warmup phase\n",
    "    \"\"\"\n",
    "    def __init__(self, optimizer, total_iters, last_epoch=-1):\n",
    "\n",
    "        self.total_iters = total_iters\n",
    "        super().__init__(optimizer, last_epoch)\n",
    "\n",
    "    def get_lr(self):\n",
    "        \"\"\"we will use the first m batches, and set the learning\n",
    "        rate to base_lr * m / total_iters\n",
    "        \"\"\"\n",
    "        return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "1a213e6c-a71f-423a-b1a0-08a598abd5de",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BasicBlock(nn.Module):\n",
    "    \"\"\"Basic Block for resnet 18 and resnet 34\n",
    "\n",
    "    \"\"\"\n",
    "\n",
    "    #BasicBlock and BottleNeck block\n",
    "    #have different output size\n",
    "    #we use class attribute expansion\n",
    "    #to distinct\n",
    "    expansion = 1\n",
    "\n",
    "    def __init__(self, in_channels, out_channels, stride=1):\n",
    "        super().__init__()\n",
    "\n",
    "        #residual function\n",
    "        self.residual_function = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(out_channels),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(out_channels * BasicBlock.expansion)\n",
    "        )\n",
    "\n",
    "        #shortcut\n",
    "        self.shortcut = nn.Sequential()\n",
    "\n",
    "        #the shortcut output dimension is not the same with residual function\n",
    "        #use 1*1 convolution to match the dimension\n",
    "        if stride != 1 or in_channels != BasicBlock.expansion * out_channels:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(out_channels * BasicBlock.expansion)\n",
    "            )\n",
    "\n",
    "    def forward(self, x):\n",
    "        return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "\n",
    "    def __init__(self, block, num_block, num_classes=100):\n",
    "        super().__init__()\n",
    "\n",
    "        self.in_channels = 64\n",
    "\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(inplace=True))\n",
    "        #we use a different inputsize than the original paper\n",
    "        #so conv2_x's stride is 1\n",
    "        self.conv2_x = self._make_layer(block, 64, num_block[0], 1)\n",
    "        self.conv3_x = self._make_layer(block, 128, num_block[1], 2)\n",
    "        self.conv4_x = self._make_layer(block, 256, num_block[2], 2)\n",
    "        self.conv5_x = self._make_layer(block, 512, num_block[3], 2)\n",
    "        self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))\n",
    "        self.fc = nn.Linear(512 * block.expansion, num_classes)\n",
    "\n",
    "    def _make_layer(self, block, out_channels, num_blocks, stride):\n",
    "        \"\"\"make resnet layers(by layer i didnt mean this 'layer' was the\n",
    "        same as a neuron netowork layer, ex. conv layer), one layer may\n",
    "        contain more than one residual block\n",
    "\n",
    "        Args:\n",
    "            block: block type, basic block or bottle neck block\n",
    "            out_channels: output depth channel number of this layer\n",
    "            num_blocks: how many blocks per layer\n",
    "            stride: the stride of the first block of this layer\n",
    "\n",
    "        Return:\n",
    "            return a resnet layer\n",
    "        \"\"\"\n",
    "\n",
    "        # we have num_block blocks per layer, the first block\n",
    "        # could be 1 or 2, other blocks would always be 1\n",
    "        strides = [stride] + [1] * (num_blocks - 1)\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.in_channels, out_channels, stride))\n",
    "            self.in_channels = out_channels * block.expansion\n",
    "\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        output = self.conv1(x)\n",
    "        output = self.conv2_x(output)\n",
    "        output = self.conv3_x(output)\n",
    "        output = self.conv4_x(output)\n",
    "        output = self.conv5_x(output)\n",
    "        output = self.avg_pool(output)\n",
    "        output = output.view(output.size(0), -1)\n",
    "        output = self.fc(output)\n",
    "\n",
    "        return output\n",
    "\n",
    "def resnet18():\n",
    "    \"\"\" return a ResNet 18 object\n",
    "    \"\"\"\n",
    "    return ResNet(BasicBlock, [2, 2, 2, 2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "4a7e3eeb-087e-46ef-ad96-9341801e75ea",
   "metadata": {},
   "outputs": [],
   "source": [
    "CIFAR100_TRAIN_MEAN = (0.5070751592371323, 0.48654887331495095, 0.4409178433670343)\n",
    "CIFAR100_TRAIN_STD = (0.2673342858792401, 0.2564384629170883, 0.27615047132568404)\n",
    "batch_size = 128\n",
    "EPOCH = 200\n",
    "warm = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10389050-88e0-4fb1-a2e4-4e41d5858bd3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "7952fd90-5867-4789-b7de-076218231a37",
   "metadata": {},
   "outputs": [],
   "source": [
    "net = resnet18()\n",
    "net = net.cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "6bb1ca37-4362-4101-895a-fa8f747ec091",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "cifar100_training_loader = get_training_dataloader(\n",
    "        CIFAR100_TRAIN_MEAN,\n",
    "        CIFAR100_TRAIN_STD,\n",
    "        batch_size=batch_size,\n",
    "        shuffle=True\n",
    "    )\n",
    "\n",
    "cifar100_test_loader = get_test_dataloader(\n",
    "    CIFAR100_TRAIN_MEAN,\n",
    "    CIFAR100_TRAIN_STD,\n",
    "    batch_size=batch_size,\n",
    "    shuffle=True\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "239146c2-025c-4794-b5e4-5127c8d76a1b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "334f200a-efac-4913-b99b-2291130f82bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_function = nn.CrossEntropyLoss()\n",
    "optimizer = optim.SGD(net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n",
    "train_scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 160], gamma=0.2) #learning rate decay\n",
    "iter_per_epoch = len(cifar100_training_loader)\n",
    "warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd32b9d7-598c-415e-a213-3358b4ee9929",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "a31fe882-2960-43b2-9bba-7280a5582a5c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Evaluating Network.....\n",
      "Test set: Epoch: 1, Average loss: 0.0302, Accuracy: 0.1226, Time consumed:1.94s\n",
      "\n",
      "tensor(0.1226, device='cuda:0')\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Evaluating Network.....\n",
      "Test set: Epoch: 2, Average loss: 0.0244, Accuracy: 0.2355, Time consumed:1.86s\n",
      "\n",
      "tensor(0.2355, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 3, Average loss: 0.0232, Accuracy: 0.2704, Time consumed:1.86s\n",
      "\n",
      "tensor(0.2704, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 4, Average loss: 0.0199, Accuracy: 0.3552, Time consumed:1.86s\n",
      "\n",
      "tensor(0.3552, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 5, Average loss: 0.0166, Accuracy: 0.4320, Time consumed:1.86s\n",
      "\n",
      "tensor(0.4320, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 6, Average loss: 0.0161, Accuracy: 0.4494, Time consumed:1.86s\n",
      "\n",
      "tensor(0.4494, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 7, Average loss: 0.0151, Accuracy: 0.4888, Time consumed:1.86s\n",
      "\n",
      "tensor(0.4888, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 8, Average loss: 0.0153, Accuracy: 0.4839, Time consumed:1.87s\n",
      "\n",
      "tensor(0.4839, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 9, Average loss: 0.0139, Accuracy: 0.5218, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5218, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 10, Average loss: 0.0138, Accuracy: 0.5224, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5224, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 11, Average loss: 0.0144, Accuracy: 0.5021, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5021, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 12, Average loss: 0.0179, Accuracy: 0.4405, Time consumed:1.85s\n",
      "\n",
      "tensor(0.4405, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 13, Average loss: 0.0138, Accuracy: 0.5401, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5401, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 14, Average loss: 0.0132, Accuracy: 0.5473, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5473, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 15, Average loss: 0.0139, Accuracy: 0.5304, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5304, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 16, Average loss: 0.0146, Accuracy: 0.5127, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5127, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 17, Average loss: 0.0129, Accuracy: 0.5533, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5533, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 18, Average loss: 0.0144, Accuracy: 0.5247, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5247, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 19, Average loss: 0.0127, Accuracy: 0.5590, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5590, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 20, Average loss: 0.0133, Accuracy: 0.5513, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5513, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 21, Average loss: 0.0135, Accuracy: 0.5425, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5425, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 22, Average loss: 0.0147, Accuracy: 0.5320, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5320, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 23, Average loss: 0.0145, Accuracy: 0.5247, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5247, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 24, Average loss: 0.0145, Accuracy: 0.5303, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5303, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 25, Average loss: 0.0136, Accuracy: 0.5514, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5514, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 26, Average loss: 0.0135, Accuracy: 0.5561, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5561, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 27, Average loss: 0.0141, Accuracy: 0.5343, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5343, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 28, Average loss: 0.0126, Accuracy: 0.5710, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5710, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 29, Average loss: 0.0134, Accuracy: 0.5517, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5517, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 30, Average loss: 0.0132, Accuracy: 0.5485, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5485, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 31, Average loss: 0.0125, Accuracy: 0.5861, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5861, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 32, Average loss: 0.0133, Accuracy: 0.5526, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5526, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 33, Average loss: 0.0135, Accuracy: 0.5521, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5521, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 34, Average loss: 0.0123, Accuracy: 0.5786, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5786, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 35, Average loss: 0.0137, Accuracy: 0.5462, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5462, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 36, Average loss: 0.0147, Accuracy: 0.5178, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5178, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 37, Average loss: 0.0141, Accuracy: 0.5383, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5383, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 38, Average loss: 0.0135, Accuracy: 0.5526, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5526, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 39, Average loss: 0.0134, Accuracy: 0.5630, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5630, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 40, Average loss: 0.0137, Accuracy: 0.5529, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5529, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 41, Average loss: 0.0135, Accuracy: 0.5437, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5437, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 42, Average loss: 0.0123, Accuracy: 0.5862, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5862, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 43, Average loss: 0.0136, Accuracy: 0.5520, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5520, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 44, Average loss: 0.0149, Accuracy: 0.5180, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5180, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 45, Average loss: 0.0122, Accuracy: 0.5824, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5824, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 46, Average loss: 0.0155, Accuracy: 0.5153, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5153, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 47, Average loss: 0.0126, Accuracy: 0.5860, Time consumed:1.88s\n",
      "\n",
      "tensor(0.5860, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 48, Average loss: 0.0160, Accuracy: 0.5111, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5111, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 49, Average loss: 0.0145, Accuracy: 0.5299, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5299, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 50, Average loss: 0.0129, Accuracy: 0.5654, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5654, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 51, Average loss: 0.0132, Accuracy: 0.5621, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5621, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 52, Average loss: 0.0119, Accuracy: 0.5917, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5917, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 53, Average loss: 0.0136, Accuracy: 0.5469, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5469, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 54, Average loss: 0.0128, Accuracy: 0.5723, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5723, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 55, Average loss: 0.0131, Accuracy: 0.5617, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5617, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 56, Average loss: 0.0129, Accuracy: 0.5688, Time consumed:1.85s\n",
      "\n",
      "tensor(0.5688, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 57, Average loss: 0.0144, Accuracy: 0.5301, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5301, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 58, Average loss: 0.0150, Accuracy: 0.5238, Time consumed:1.87s\n",
      "\n",
      "tensor(0.5238, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 59, Average loss: 0.0125, Accuracy: 0.5846, Time consumed:1.86s\n",
      "\n",
      "tensor(0.5846, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 60, Average loss: 0.0076, Accuracy: 0.7262, Time consumed:1.87s\n",
      "\n",
      "tensor(0.7262, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 61, Average loss: 0.0076, Accuracy: 0.7297, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7297, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 62, Average loss: 0.0078, Accuracy: 0.7288, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7288, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 63, Average loss: 0.0079, Accuracy: 0.7275, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7275, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 64, Average loss: 0.0083, Accuracy: 0.7132, Time consumed:1.85s\n",
      "\n",
      "tensor(0.7132, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 65, Average loss: 0.0082, Accuracy: 0.7176, Time consumed:1.85s\n",
      "\n",
      "tensor(0.7176, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 66, Average loss: 0.0090, Accuracy: 0.7080, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7080, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 67, Average loss: 0.0091, Accuracy: 0.7072, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7072, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 68, Average loss: 0.0091, Accuracy: 0.7006, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7006, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 69, Average loss: 0.0091, Accuracy: 0.7116, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7116, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 70, Average loss: 0.0093, Accuracy: 0.7047, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7047, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 71, Average loss: 0.0093, Accuracy: 0.7009, Time consumed:1.86s\n",
      "\n",
      "tensor(0.7009, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 72, Average loss: 0.0095, Accuracy: 0.6952, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6952, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 73, Average loss: 0.0097, Accuracy: 0.6888, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6888, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 74, Average loss: 0.0095, Accuracy: 0.6983, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6983, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 75, Average loss: 0.0094, Accuracy: 0.7033, Time consumed:1.87s\n",
      "\n",
      "tensor(0.7033, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 76, Average loss: 0.0096, Accuracy: 0.6962, Time consumed:1.85s\n",
      "\n",
      "tensor(0.6962, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 77, Average loss: 0.0098, Accuracy: 0.6887, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6887, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 78, Average loss: 0.0099, Accuracy: 0.6931, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6931, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 79, Average loss: 0.0102, Accuracy: 0.6834, Time consumed:1.88s\n",
      "\n",
      "tensor(0.6834, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 80, Average loss: 0.0100, Accuracy: 0.6894, Time consumed:1.88s\n",
      "\n",
      "tensor(0.6894, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 81, Average loss: 0.0097, Accuracy: 0.6922, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6922, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 82, Average loss: 0.0100, Accuracy: 0.6858, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6858, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 83, Average loss: 0.0103, Accuracy: 0.6853, Time consumed:1.85s\n",
      "\n",
      "tensor(0.6853, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 84, Average loss: 0.0100, Accuracy: 0.6880, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6880, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 85, Average loss: 0.0102, Accuracy: 0.6892, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6892, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 86, Average loss: 0.0102, Accuracy: 0.6913, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6913, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 87, Average loss: 0.0105, Accuracy: 0.6791, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6791, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 88, Average loss: 0.0102, Accuracy: 0.6920, Time consumed:1.85s\n",
      "\n",
      "tensor(0.6920, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 89, Average loss: 0.0102, Accuracy: 0.6884, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6884, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 90, Average loss: 0.0105, Accuracy: 0.6783, Time consumed:1.86s\n",
      "\n",
      "tensor(0.6783, device='cuda:0')\n",
      "Evaluating Network.....\n",
      "Test set: Epoch: 91, Average loss: 0.0104, Accuracy: 0.6833, Time consumed:1.87s\n",
      "\n",
      "tensor(0.6833, device='cuda:0')\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[0;32mIn[11], line 5\u001b[0m\n\u001b[1;32m      2\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m epoch \u001b[38;5;241m>\u001b[39m warm:\n\u001b[1;32m      3\u001b[0m     train_scheduler\u001b[38;5;241m.\u001b[39mstep(epoch)\n\u001b[0;32m----> 5\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mepoch\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      6\u001b[0m acc \u001b[38;5;241m=\u001b[39m eval_training(epoch)\n\u001b[1;32m      8\u001b[0m \u001b[38;5;28mprint\u001b[39m(acc)\n",
      "Cell \u001b[0;32mIn[3], line 7\u001b[0m, in \u001b[0;36mtrain\u001b[0;34m(epoch)\u001b[0m\n\u001b[1;32m      3\u001b[0m net\u001b[38;5;241m.\u001b[39mtrain()\n\u001b[1;32m      4\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m batch_index, (images, labels) \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(cifar100_training_loader):\n\u001b[0;32m----> 7\u001b[0m     labels \u001b[38;5;241m=\u001b[39m \u001b[43mlabels\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mcuda\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m      8\u001b[0m     images \u001b[38;5;241m=\u001b[39m images\u001b[38;5;241m.\u001b[39mcuda()\n\u001b[1;32m     10\u001b[0m     optimizer\u001b[38;5;241m.\u001b[39mzero_grad()\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "for epoch in range(1, EPOCH + 1):\n",
    "    if epoch > warm:\n",
    "        train_scheduler.step(epoch)\n",
    "\n",
    "    train(epoch)\n",
    "    acc = eval_training(epoch)\n",
    "\n",
    "    print(acc)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "567c2a89-9620-4c7e-b7c1-5f8bb45c65dc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c895a72-d385-4046-a293-675768efbfdb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5db15ad2-d8e6-4098-9e4b-0eddd4ae3fba",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "608537ed-98bb-42cc-8122-37e2dd10ce99",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "183e855b-6e48-43eb-8849-314f9d69fcbe",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "34adda05-c34b-46b1-a773-41be31d2f1e6",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "opacus",
   "language": "python",
   "name": "opacus"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
