{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# import modules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "from torchvision.datasets.mnist import MNIST\n",
    "from torchvision.datasets import CIFAR10, CIFAR100, ImageFolder\n",
    "from torchvision.datasets.imagenet import ImageNet\n",
    "from torch.utils.data import DataLoader\n",
    "import torchvision.transforms as transforms\n",
    "import numpy as np\n",
    "import collections\n",
    "import random"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# model structure definition"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# ResNet modules"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BasicBlock(nn.Module):\n",
    "    expansion = 1\n",
    " \n",
    "    def __init__(self, in_planes, planes, stride=1):\n",
    "        super(BasicBlock, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    " \n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or in_planes != self.expansion*planes:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*planes)\n",
    "            )\n",
    " \n",
    "    def forward(self, x):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = self.bn2(self.conv2(out))\n",
    "        out += self.shortcut(x)\n",
    "        out = F.relu(out)\n",
    "        return out\n",
    " \n",
    " \n",
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4\n",
    " \n",
    "    def __init__(self, in_planes, planes, stride=1):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n",
    "        self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n",
    " \n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or in_planes != self.expansion*planes:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*planes)\n",
    "            )\n",
    " \n",
    "    def forward(self, x):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = F.relu(self.bn2(self.conv2(out)))\n",
    "        out = self.bn3(self.conv3(out))\n",
    "        out += self.shortcut(x)\n",
    "        out = F.relu(out)\n",
    "        return out\n",
    " \n",
    " \n",
    "class ResNet(nn.Module):\n",
    "    def __init__(self, block, num_blocks, num_classes=10):\n",
    "        super(ResNet, self).__init__()\n",
    "        self.in_planes = 64\n",
    " \n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n",
    "        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n",
    "        self.linear = nn.Linear(512*block.expansion, num_classes)\n",
    " \n",
    "    def _make_layer(self, block, planes, num_blocks, stride):\n",
    "        strides = [stride] + [1]*(num_blocks-1)\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.in_planes, planes, stride))\n",
    "            self.in_planes = planes * block.expansion\n",
    "        return nn.Sequential(*layers)\n",
    " \n",
    "    def forward(self, x, out_feature=False):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = self.layer1(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = F.avg_pool2d(out, 4)\n",
    "        feature = out.view(out.size(0), -1)\n",
    "        out = self.linear(feature)\n",
    "        if out_feature == False:\n",
    "            return out\n",
    "        else:\n",
    "            return out,feature\n",
    " \n",
    " \n",
    "def ResNet18(num_classes=10):\n",
    "    return ResNet(BasicBlock, [2,2,2,2], num_classes)\n",
    " \n",
    "def ResNet34(num_classes=10):\n",
    "    return ResNet(BasicBlock, [3,4,6,3], num_classes)\n",
    " \n",
    "def ResNet50(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,4,6,3], num_classes)\n",
    " \n",
    "def ResNet101(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,4,23,3], num_classes)\n",
    " \n",
    "def ResNet152(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,8,36,3], num_classes)\n",
    " \n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Model Trainer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class TeacherTrainer:\n",
    "\tdef __init__(self, path_ckpt, path_loss, path_dataset, name_dataset='cifar10', \n",
    "\t\t\t\t bs=512, num_epochsaving=100, resume_train=False, path_resume=None):\n",
    "\t\tif name_dataset == 'cifar10':\n",
    "\t\t\ttransform_train = transforms.Compose([\n",
    "\t\t\t\ttransforms.RandomCrop(32, padding=4),\n",
    "\t\t\t\ttransforms.RandomHorizontalFlip(),\n",
    "\t\t\t\ttransforms.ToTensor(),\n",
    "\t\t\t\ttransforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "\t\t\t\t])\n",
    "\t\t\ttransform_test = transforms.Compose([\n",
    "\t\t\t\ttransforms.ToTensor(),\n",
    "\t\t\t\ttransforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "\t\t\t])\n",
    "\t\t\tself.dataset_train = CIFAR10(path_dataset, transform=transform_train)\n",
    "\t\t\tself.dataset_test = CIFAR10(path_dataset, train=False, transform=transform_test)\n",
    "\t\t\tself.dataset_test_loader = DataLoader(self.dataset_test, batch_size=100, num_workers=0)\n",
    "\t\t\tself.dataset_train_loader = DataLoader(self.dataset_train, batch_size=128, shuffle=True, num_workers=8)\n",
    "\t\t\t\n",
    "\t\t\t\n",
    "\t\t\ttorch.cuda.set_device('cuda:0')\n",
    "\t\t\tself.net = ResNet34().cuda()\n",
    "\t\t\t#self.net = nn.DataParallel(self.net,device_ids=[0,1,2,3], output_device=0)\n",
    "\t\t\tself.criterion = torch.nn.CrossEntropyLoss().cuda()\n",
    "\t\t\tself.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n",
    "\t\t\t#self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer, T_0=5, T_mult=2, eta_min=0, last_epoch=-1)\n",
    "\t\t\tself.last_epoch = 0\n",
    "\t\t\tif resume_train:\n",
    "\t\t\t\tckpt = torch.load(path_resume)\n",
    "\t\t\t\tnet.load_state_dict(ckpt['net'])\n",
    "\t\t\t\tself.optimizer.load_state_dict(ckpt['optimizer'])\n",
    "\t\t\t\tself.lr_scheduler.load_state_dict(ckpt['lr_scheduler'])\n",
    "\t\t\t\tself.last_epoch = ckpt['epoch']\n",
    "\t\t\t\n",
    "\t\t\t\n",
    "\t\t\n",
    "\t\t# 训练相关参数\n",
    "\t\tself.best_accr = 0\n",
    "\t\tself.list_loss = []\n",
    "\t\tself.path_ckpt = path_ckpt\n",
    "\t\tself.path_loss = path_loss\n",
    "\t\tself.num_epochsaving = num_epochsaving\n",
    "\tdef train(self, epochs):\n",
    "\t\t\n",
    "\t\tfor epoch in range(self.last_epoch+1, epochs+1):\n",
    "\t\t\tself.net.train()\n",
    "\t\t\tloss_epoch = 0\n",
    "\t\t\tfor i, (batch_img, batch_label) in enumerate(self.dataset_train_loader, start=1):\n",
    "\t\t\t\tbatch_img, batch_label = Variable(batch_img).cuda(), Variable(batch_label).cuda()\n",
    "\t\t\t\tself.optimizer.zero_grad()\n",
    "\t\t\t\toutput = self.net(batch_img)\n",
    "\t\t\t\tloss = self.criterion(output, batch_label)\n",
    "\t\t\t\tloss.backward()\n",
    "\t\t\t\tself.optimizer.step()\n",
    "\t\t\t\tloss_epoch += loss.data.item()\n",
    "\t\t\t# 一个epoch结束\n",
    "\t\t\t#self.lr_scheduler.step()\n",
    "\t\t\tself.adjust_lr(epoch)\n",
    "\t\t\tself.list_loss.append(loss_epoch)\n",
    "\t\t\tprint('Train-Epoch:%d, Loss:%f'%(epoch, loss_epoch))\n",
    "\t\t\t# 测试\n",
    "\t\t\tself.test(epoch)\n",
    "\t\tself.save_experiment(epochs)\n",
    "\t\t\n",
    "\t\n",
    "\tdef adjust_lr(self, epoch):\n",
    "\t\tif epoch < 80:\n",
    "\t\t\tlr = 0.1\n",
    "\t\tif epoch < 120:\n",
    "\t\t\tlr = 0.01\n",
    "\t\telse:\n",
    "\t\t\tlr = 0.001\n",
    "\t\tfor param_group in self.optimizer.param_groups:## ??\n",
    "\t\t\tparam_group['lr'] = lr\n",
    "\n",
    "\tdef test(self, epoch):\n",
    "\t\tself.net.eval()\n",
    "\t\ttotal_correct = 0\n",
    "\t\twith torch.no_grad():\n",
    "\t\t\tfor i, (images, labels) in enumerate(self.dataset_test_loader, start=1):\n",
    "\t\t\t\timages, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "\t\t\t\toutput = self.net(images)\n",
    "\t\t\t\tpred = output.data.max(1)[1]\n",
    "\t\t\t\ttotal_correct += pred.eq(labels.data.view_as(pred)).sum()\n",
    "            \n",
    "\t\tacc = float(total_correct) / len(self.dataset_test)\n",
    "\t\tif acc > self.best_accr:\n",
    "\t\t\tself.best_accr = acc\n",
    "\t\t\tif epoch > self.num_epochsaving:\n",
    "\t\t\t\tself.save_model(self.path_ckpt, epoch)\n",
    "\t\t\n",
    "\t\tprint('Test-Accuracy:%f' % (acc))\n",
    "\n",
    "\tdef save_model(self, path, epoch):\n",
    "\t\tstate = {\n",
    "\t\t\t'net': self.net.state_dict(), \n",
    "\t\t\t'optimizer':self.optimizer.state_dict(), \n",
    "\t\t\t#'lr_scheduler':self.lr_scheduler.state_dict(),\n",
    "\t\t\t'epoch':epoch}\n",
    "\t\tfilename = path + 'teacher__accr%f_epoch%d.pth'%(self.best_accr, epoch)\n",
    "\t\ttorch.save(state, filename)\n",
    "\tdef save_experiment(self, epochs):\n",
    "\t\tlossfile = np.array(self.list_loss)\n",
    "\t\tnp.save(self.path_loss + '/teacher_loss_{}'.format(epochs), lossfile)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train-Epoch:1, Loss:828.806569\n",
      "Test-Accuracy:0.376300\n",
      "Train-Epoch:2, Loss:638.174941\n",
      "Test-Accuracy:0.440600\n",
      "Train-Epoch:3, Loss:591.991789\n",
      "Test-Accuracy:0.472200\n",
      "Train-Epoch:4, Loss:553.309129\n",
      "Test-Accuracy:0.494200\n",
      "Train-Epoch:5, Loss:520.737592\n",
      "Test-Accuracy:0.533500\n",
      "Train-Epoch:6, Loss:485.463630\n",
      "Test-Accuracy:0.582400\n",
      "Train-Epoch:7, Loss:448.980289\n",
      "Test-Accuracy:0.614300\n",
      "Train-Epoch:8, Loss:413.368319\n",
      "Test-Accuracy:0.649300\n",
      "Train-Epoch:9, Loss:380.640997\n",
      "Test-Accuracy:0.647500\n",
      "Train-Epoch:10, Loss:353.763481\n",
      "Test-Accuracy:0.679700\n",
      "Train-Epoch:11, Loss:326.419531\n",
      "Test-Accuracy:0.722100\n",
      "Train-Epoch:12, Loss:305.624478\n",
      "Test-Accuracy:0.709900\n",
      "Train-Epoch:13, Loss:285.205608\n",
      "Test-Accuracy:0.754300\n",
      "Train-Epoch:14, Loss:267.143549\n",
      "Test-Accuracy:0.747900\n",
      "Train-Epoch:15, Loss:252.011575\n",
      "Test-Accuracy:0.773600\n",
      "Train-Epoch:16, Loss:237.373681\n",
      "Test-Accuracy:0.780900\n",
      "Train-Epoch:17, Loss:223.197099\n",
      "Test-Accuracy:0.784000\n",
      "Train-Epoch:18, Loss:210.005565\n",
      "Test-Accuracy:0.810800\n",
      "Train-Epoch:19, Loss:195.328277\n",
      "Test-Accuracy:0.803900\n",
      "Train-Epoch:20, Loss:184.595183\n",
      "Test-Accuracy:0.793600\n",
      "Train-Epoch:21, Loss:176.152427\n",
      "Test-Accuracy:0.822800\n",
      "Train-Epoch:22, Loss:166.181472\n",
      "Test-Accuracy:0.831400\n",
      "Train-Epoch:23, Loss:157.459397\n",
      "Test-Accuracy:0.839200\n",
      "Train-Epoch:24, Loss:149.089191\n",
      "Test-Accuracy:0.825100\n",
      "Train-Epoch:25, Loss:142.732105\n",
      "Test-Accuracy:0.843600\n",
      "Train-Epoch:26, Loss:135.149923\n",
      "Test-Accuracy:0.846600\n",
      "Train-Epoch:27, Loss:131.108065\n",
      "Test-Accuracy:0.860300\n",
      "Train-Epoch:28, Loss:123.136558\n",
      "Test-Accuracy:0.855800\n",
      "Train-Epoch:29, Loss:120.688699\n",
      "Test-Accuracy:0.861000\n",
      "Train-Epoch:30, Loss:113.007052\n",
      "Test-Accuracy:0.857400\n",
      "Train-Epoch:31, Loss:108.509123\n",
      "Test-Accuracy:0.853400\n",
      "Train-Epoch:32, Loss:102.397095\n",
      "Test-Accuracy:0.863700\n",
      "Train-Epoch:33, Loss:100.781980\n",
      "Test-Accuracy:0.865800\n",
      "Train-Epoch:34, Loss:93.529316\n",
      "Test-Accuracy:0.868200\n",
      "Train-Epoch:35, Loss:91.705348\n",
      "Test-Accuracy:0.870600\n",
      "Train-Epoch:36, Loss:85.074813\n",
      "Test-Accuracy:0.866200\n",
      "Train-Epoch:37, Loss:84.267900\n",
      "Test-Accuracy:0.869300\n",
      "Train-Epoch:38, Loss:80.148135\n",
      "Test-Accuracy:0.877800\n",
      "Train-Epoch:39, Loss:74.635458\n",
      "Test-Accuracy:0.870500\n",
      "Train-Epoch:40, Loss:72.566832\n",
      "Test-Accuracy:0.865200\n",
      "Train-Epoch:41, Loss:69.490237\n",
      "Test-Accuracy:0.877200\n",
      "Train-Epoch:42, Loss:67.419897\n",
      "Test-Accuracy:0.868100\n",
      "Train-Epoch:43, Loss:63.003676\n",
      "Test-Accuracy:0.875700\n",
      "Train-Epoch:44, Loss:62.920989\n",
      "Test-Accuracy:0.870600\n",
      "Train-Epoch:45, Loss:62.571969\n",
      "Test-Accuracy:0.875800\n",
      "Train-Epoch:46, Loss:58.583891\n",
      "Test-Accuracy:0.883000\n",
      "Train-Epoch:47, Loss:56.202036\n",
      "Test-Accuracy:0.885300\n",
      "Train-Epoch:48, Loss:53.694223\n",
      "Test-Accuracy:0.878000\n",
      "Train-Epoch:49, Loss:50.939164\n",
      "Test-Accuracy:0.882300\n",
      "Train-Epoch:50, Loss:48.173750\n",
      "Test-Accuracy:0.888200\n",
      "Train-Epoch:51, Loss:49.193151\n",
      "Test-Accuracy:0.883100\n",
      "Train-Epoch:52, Loss:46.764314\n",
      "Test-Accuracy:0.880200\n",
      "Train-Epoch:53, Loss:46.905105\n",
      "Test-Accuracy:0.885100\n",
      "Train-Epoch:54, Loss:44.277736\n",
      "Test-Accuracy:0.887300\n",
      "Train-Epoch:55, Loss:42.456437\n",
      "Test-Accuracy:0.882200\n",
      "Train-Epoch:56, Loss:41.002272\n",
      "Test-Accuracy:0.892600\n",
      "Train-Epoch:57, Loss:41.035953\n",
      "Test-Accuracy:0.889900\n",
      "Train-Epoch:58, Loss:40.797210\n",
      "Test-Accuracy:0.895000\n",
      "Train-Epoch:59, Loss:39.585670\n",
      "Test-Accuracy:0.888800\n",
      "Train-Epoch:60, Loss:36.599808\n",
      "Test-Accuracy:0.892900\n",
      "Train-Epoch:61, Loss:37.376700\n",
      "Test-Accuracy:0.893700\n",
      "Train-Epoch:62, Loss:35.930674\n",
      "Test-Accuracy:0.881400\n",
      "Train-Epoch:63, Loss:35.681892\n",
      "Test-Accuracy:0.897600\n",
      "Train-Epoch:64, Loss:35.684504\n",
      "Test-Accuracy:0.889200\n",
      "Train-Epoch:65, Loss:34.871174\n",
      "Test-Accuracy:0.889900\n",
      "Train-Epoch:66, Loss:32.920970\n",
      "Test-Accuracy:0.890100\n",
      "Train-Epoch:67, Loss:32.064673\n",
      "Test-Accuracy:0.895300\n",
      "Train-Epoch:68, Loss:31.790013\n",
      "Test-Accuracy:0.893200\n",
      "Train-Epoch:69, Loss:32.315043\n",
      "Test-Accuracy:0.897100\n",
      "Train-Epoch:70, Loss:31.171462\n",
      "Test-Accuracy:0.896300\n",
      "Train-Epoch:71, Loss:30.725178\n",
      "Test-Accuracy:0.891800\n",
      "Train-Epoch:72, Loss:30.595044\n",
      "Test-Accuracy:0.894500\n",
      "Train-Epoch:73, Loss:29.513291\n",
      "Test-Accuracy:0.886800\n",
      "Train-Epoch:74, Loss:29.185817\n",
      "Test-Accuracy:0.891100\n",
      "Train-Epoch:75, Loss:28.778401\n",
      "Test-Accuracy:0.899400\n",
      "Train-Epoch:76, Loss:29.474482\n",
      "Test-Accuracy:0.900300\n",
      "Train-Epoch:77, Loss:27.296780\n",
      "Test-Accuracy:0.900400\n",
      "Train-Epoch:78, Loss:28.858988\n",
      "Test-Accuracy:0.882800\n",
      "Train-Epoch:79, Loss:28.187578\n",
      "Test-Accuracy:0.894300\n",
      "Train-Epoch:80, Loss:29.943296\n",
      "Test-Accuracy:0.897200\n",
      "Train-Epoch:81, Loss:26.915275\n",
      "Test-Accuracy:0.899200\n",
      "Train-Epoch:82, Loss:27.537648\n",
      "Test-Accuracy:0.895400\n",
      "Train-Epoch:83, Loss:29.335678\n",
      "Test-Accuracy:0.893800\n",
      "Train-Epoch:84, Loss:28.908269\n",
      "Test-Accuracy:0.886300\n",
      "Train-Epoch:85, Loss:27.315302\n",
      "Test-Accuracy:0.897800\n",
      "Train-Epoch:86, Loss:27.639576\n",
      "Test-Accuracy:0.891600\n",
      "Train-Epoch:87, Loss:26.715374\n",
      "Test-Accuracy:0.899800\n",
      "Train-Epoch:88, Loss:27.328691\n",
      "Test-Accuracy:0.889900\n",
      "Train-Epoch:89, Loss:25.892910\n",
      "Test-Accuracy:0.902000\n",
      "Train-Epoch:90, Loss:27.440657\n",
      "Test-Accuracy:0.896900\n",
      "Train-Epoch:91, Loss:26.219237\n",
      "Test-Accuracy:0.896100\n",
      "Train-Epoch:92, Loss:25.153305\n",
      "Test-Accuracy:0.896000\n",
      "Train-Epoch:93, Loss:24.953133\n",
      "Test-Accuracy:0.894300\n",
      "Train-Epoch:94, Loss:26.943497\n",
      "Test-Accuracy:0.887100\n",
      "Train-Epoch:95, Loss:29.129437\n",
      "Test-Accuracy:0.899200\n",
      "Train-Epoch:96, Loss:24.909385\n",
      "Test-Accuracy:0.886700\n",
      "Train-Epoch:97, Loss:26.025893\n",
      "Test-Accuracy:0.894000\n",
      "Train-Epoch:98, Loss:26.160172\n",
      "Test-Accuracy:0.899600\n",
      "Train-Epoch:99, Loss:23.803462\n",
      "Test-Accuracy:0.889600\n",
      "Train-Epoch:100, Loss:24.352102\n",
      "Test-Accuracy:0.894100\n",
      "Train-Epoch:101, Loss:25.408246\n",
      "Test-Accuracy:0.895900\n",
      "Train-Epoch:102, Loss:26.078395\n",
      "Test-Accuracy:0.895300\n",
      "Train-Epoch:103, Loss:25.447714\n",
      "Test-Accuracy:0.897300\n",
      "Train-Epoch:104, Loss:24.399514\n",
      "Test-Accuracy:0.900200\n",
      "Train-Epoch:105, Loss:26.877279\n",
      "Test-Accuracy:0.904500\n",
      "Train-Epoch:106, Loss:25.606274\n",
      "Test-Accuracy:0.892900\n",
      "Train-Epoch:107, Loss:25.350265\n",
      "Test-Accuracy:0.889100\n",
      "Train-Epoch:108, Loss:24.005438\n",
      "Test-Accuracy:0.911200\n",
      "Train-Epoch:109, Loss:24.215991\n",
      "Test-Accuracy:0.892600\n",
      "Train-Epoch:110, Loss:25.139328\n",
      "Test-Accuracy:0.893200\n",
      "Train-Epoch:111, Loss:24.390617\n",
      "Test-Accuracy:0.896200\n",
      "Train-Epoch:112, Loss:22.158502\n",
      "Test-Accuracy:0.905700\n",
      "Train-Epoch:113, Loss:25.576068\n",
      "Test-Accuracy:0.895900\n",
      "Train-Epoch:114, Loss:27.239605\n",
      "Test-Accuracy:0.904000\n",
      "Train-Epoch:115, Loss:23.205416\n",
      "Test-Accuracy:0.905200\n",
      "Train-Epoch:116, Loss:26.430034\n",
      "Test-Accuracy:0.905800\n",
      "Train-Epoch:117, Loss:22.742802\n",
      "Test-Accuracy:0.908300\n",
      "Train-Epoch:118, Loss:22.682169\n",
      "Test-Accuracy:0.902400\n",
      "Train-Epoch:119, Loss:23.927469\n",
      "Test-Accuracy:0.898000\n",
      "Train-Epoch:120, Loss:23.931869\n",
      "Test-Accuracy:0.909200\n",
      "Train-Epoch:121, Loss:9.134298\n",
      "Test-Accuracy:0.925300\n",
      "Train-Epoch:122, Loss:4.867863\n",
      "Test-Accuracy:0.927100\n",
      "Train-Epoch:123, Loss:3.660492\n",
      "Test-Accuracy:0.931000\n",
      "Train-Epoch:124, Loss:3.149187\n",
      "Test-Accuracy:0.930500\n",
      "Train-Epoch:125, Loss:2.603225\n",
      "Test-Accuracy:0.933200\n",
      "Train-Epoch:126, Loss:2.489312\n",
      "Test-Accuracy:0.932000\n",
      "Train-Epoch:127, Loss:1.969017\n",
      "Test-Accuracy:0.931000\n",
      "Train-Epoch:128, Loss:1.734822\n",
      "Test-Accuracy:0.933000\n",
      "Train-Epoch:129, Loss:1.606618\n",
      "Test-Accuracy:0.932100\n",
      "Train-Epoch:130, Loss:1.626236\n",
      "Test-Accuracy:0.932600\n",
      "Train-Epoch:131, Loss:1.447180\n",
      "Test-Accuracy:0.932600\n",
      "Train-Epoch:132, Loss:1.415135\n",
      "Test-Accuracy:0.931800\n",
      "Train-Epoch:133, Loss:1.174868\n",
      "Test-Accuracy:0.933400\n",
      "Train-Epoch:134, Loss:1.150176\n",
      "Test-Accuracy:0.932900\n",
      "Train-Epoch:135, Loss:1.311001\n",
      "Test-Accuracy:0.932700\n",
      "Train-Epoch:136, Loss:1.165253\n",
      "Test-Accuracy:0.932800\n",
      "Train-Epoch:137, Loss:0.990791\n",
      "Test-Accuracy:0.933400\n",
      "Train-Epoch:138, Loss:1.036758\n",
      "Test-Accuracy:0.934100\n",
      "Train-Epoch:139, Loss:1.075476\n",
      "Test-Accuracy:0.934600\n",
      "Train-Epoch:140, Loss:1.100097\n",
      "Test-Accuracy:0.933600\n",
      "Train-Epoch:141, Loss:0.943727\n",
      "Test-Accuracy:0.935400\n",
      "Train-Epoch:142, Loss:1.004225\n",
      "Test-Accuracy:0.933800\n",
      "Train-Epoch:143, Loss:1.014155\n",
      "Test-Accuracy:0.934400\n",
      "Train-Epoch:144, Loss:0.858513\n",
      "Test-Accuracy:0.935600\n",
      "Train-Epoch:145, Loss:0.901244\n",
      "Test-Accuracy:0.934400\n",
      "Train-Epoch:146, Loss:0.857179\n",
      "Test-Accuracy:0.935100\n",
      "Train-Epoch:147, Loss:0.795358\n",
      "Test-Accuracy:0.934400\n",
      "Train-Epoch:148, Loss:0.772394\n",
      "Test-Accuracy:0.935300\n",
      "Train-Epoch:149, Loss:0.811949\n",
      "Test-Accuracy:0.935900\n",
      "Train-Epoch:150, Loss:0.814487\n",
      "Test-Accuracy:0.934800\n",
      "Train-Epoch:151, Loss:0.749365\n",
      "Test-Accuracy:0.934900\n",
      "Train-Epoch:152, Loss:0.715833\n",
      "Test-Accuracy:0.934400\n",
      "Train-Epoch:153, Loss:0.703463\n",
      "Test-Accuracy:0.935300\n",
      "Train-Epoch:154, Loss:0.668316\n",
      "Test-Accuracy:0.935100\n",
      "Train-Epoch:155, Loss:0.681749\n",
      "Test-Accuracy:0.934900\n",
      "Train-Epoch:156, Loss:0.719901\n",
      "Test-Accuracy:0.934000\n",
      "Train-Epoch:157, Loss:0.670204\n",
      "Test-Accuracy:0.934700\n",
      "Train-Epoch:158, Loss:0.668586\n",
      "Test-Accuracy:0.935400\n",
      "Train-Epoch:159, Loss:0.616599\n",
      "Test-Accuracy:0.935900\n",
      "Train-Epoch:160, Loss:0.620717\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:161, Loss:0.658285\n",
      "Test-Accuracy:0.935500\n",
      "Train-Epoch:162, Loss:0.634656\n",
      "Test-Accuracy:0.936600\n",
      "Train-Epoch:163, Loss:0.625725\n",
      "Test-Accuracy:0.934500\n",
      "Train-Epoch:164, Loss:0.637536\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:165, Loss:0.588290\n",
      "Test-Accuracy:0.936000\n",
      "Train-Epoch:166, Loss:0.572150\n",
      "Test-Accuracy:0.935900\n",
      "Train-Epoch:167, Loss:0.585954\n",
      "Test-Accuracy:0.936000\n",
      "Train-Epoch:168, Loss:0.565229\n",
      "Test-Accuracy:0.935900\n",
      "Train-Epoch:169, Loss:0.553739\n",
      "Test-Accuracy:0.935700\n",
      "Train-Epoch:170, Loss:0.537319\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:171, Loss:0.560270\n",
      "Test-Accuracy:0.934500\n",
      "Train-Epoch:172, Loss:0.575551\n",
      "Test-Accuracy:0.935200\n",
      "Train-Epoch:173, Loss:0.586189\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:174, Loss:0.641899\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:175, Loss:0.579189\n",
      "Test-Accuracy:0.936600\n",
      "Train-Epoch:176, Loss:0.579515\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:177, Loss:0.547240\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:178, Loss:0.542549\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:179, Loss:0.601014\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:180, Loss:0.529905\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:181, Loss:0.542180\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:182, Loss:0.551235\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:183, Loss:0.555182\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:184, Loss:0.543374\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:185, Loss:0.513705\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:186, Loss:0.555160\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:187, Loss:0.501768\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:188, Loss:0.529127\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:189, Loss:0.537272\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:190, Loss:0.535554\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:191, Loss:0.512998\n",
      "Test-Accuracy:0.937600\n",
      "Train-Epoch:192, Loss:0.501855\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:193, Loss:0.527941\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:194, Loss:0.508176\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:195, Loss:0.493045\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:196, Loss:0.501428\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:197, Loss:0.508388\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:198, Loss:0.466559\n",
      "Test-Accuracy:0.937500\n",
      "Train-Epoch:199, Loss:0.491714\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:200, Loss:0.517473\n",
      "Test-Accuracy:0.938000\n",
      "Train-Epoch:201, Loss:0.531213\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:202, Loss:0.533402\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:203, Loss:0.525402\n",
      "Test-Accuracy:0.936600\n",
      "Train-Epoch:204, Loss:0.547020\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:205, Loss:0.513579\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:206, Loss:0.481666\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:207, Loss:0.578007\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:208, Loss:0.522632\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:209, Loss:0.554427\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:210, Loss:0.471360\n",
      "Test-Accuracy:0.937700\n",
      "Train-Epoch:211, Loss:0.482678\n",
      "Test-Accuracy:0.937500\n",
      "Train-Epoch:212, Loss:0.480757\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:213, Loss:0.455010\n",
      "Test-Accuracy:0.937700\n",
      "Train-Epoch:214, Loss:0.523400\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:215, Loss:0.530855\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:216, Loss:0.535012\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:217, Loss:0.539419\n",
      "Test-Accuracy:0.936000\n",
      "Train-Epoch:218, Loss:0.493733\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:219, Loss:0.485247\n",
      "Test-Accuracy:0.935500\n",
      "Train-Epoch:220, Loss:0.528779\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:221, Loss:0.485498\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:222, Loss:0.512450\n",
      "Test-Accuracy:0.938000\n",
      "Train-Epoch:223, Loss:0.521037\n",
      "Test-Accuracy:0.937800\n",
      "Train-Epoch:224, Loss:0.493273\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:225, Loss:0.499906\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:226, Loss:0.510658\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:227, Loss:0.508040\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:228, Loss:0.545026\n",
      "Test-Accuracy:0.936600\n",
      "Train-Epoch:229, Loss:0.518956\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:230, Loss:0.543158\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:231, Loss:0.513809\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:232, Loss:0.534687\n",
      "Test-Accuracy:0.935700\n",
      "Train-Epoch:233, Loss:0.494393\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:234, Loss:0.488413\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:235, Loss:0.470408\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:236, Loss:0.505490\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:237, Loss:0.500596\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:238, Loss:0.534746\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:239, Loss:0.494219\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:240, Loss:0.500804\n",
      "Test-Accuracy:0.937800\n",
      "Train-Epoch:241, Loss:0.537103\n",
      "Test-Accuracy:0.936000\n",
      "Train-Epoch:242, Loss:0.489559\n",
      "Test-Accuracy:0.937400\n",
      "Train-Epoch:243, Loss:0.505600\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:244, Loss:0.515775\n",
      "Test-Accuracy:0.937700\n",
      "Train-Epoch:245, Loss:0.490806\n",
      "Test-Accuracy:0.935500\n",
      "Train-Epoch:246, Loss:0.501858\n",
      "Test-Accuracy:0.937900\n",
      "Train-Epoch:247, Loss:0.505853\n",
      "Test-Accuracy:0.937700\n",
      "Train-Epoch:248, Loss:0.500259\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:249, Loss:0.489432\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:250, Loss:0.549758\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:251, Loss:0.528244\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:252, Loss:0.505974\n",
      "Test-Accuracy:0.935500\n",
      "Train-Epoch:253, Loss:0.509087\n",
      "Test-Accuracy:0.935900\n",
      "Train-Epoch:254, Loss:0.474220\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:255, Loss:0.468926\n",
      "Test-Accuracy:0.936000\n",
      "Train-Epoch:256, Loss:0.485959\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:257, Loss:0.515080\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:258, Loss:0.510204\n",
      "Test-Accuracy:0.937300\n",
      "Train-Epoch:259, Loss:0.482267\n",
      "Test-Accuracy:0.938000\n",
      "Train-Epoch:260, Loss:0.479871\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:261, Loss:0.488297\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:262, Loss:0.541888\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:263, Loss:0.479357\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:264, Loss:0.555247\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:265, Loss:0.531977\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:266, Loss:0.477657\n",
      "Test-Accuracy:0.935700\n",
      "Train-Epoch:267, Loss:0.514610\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:268, Loss:0.533259\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:269, Loss:0.545071\n",
      "Test-Accuracy:0.937600\n",
      "Train-Epoch:270, Loss:0.498979\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:271, Loss:0.514807\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:272, Loss:0.505588\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:273, Loss:0.507234\n",
      "Test-Accuracy:0.936500\n",
      "Train-Epoch:274, Loss:0.535218\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:275, Loss:0.483243\n",
      "Test-Accuracy:0.935800\n",
      "Train-Epoch:276, Loss:0.507754\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:277, Loss:0.526779\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:278, Loss:0.527354\n",
      "Test-Accuracy:0.935300\n",
      "Train-Epoch:279, Loss:0.502186\n",
      "Test-Accuracy:0.935000\n",
      "Train-Epoch:280, Loss:0.485803\n",
      "Test-Accuracy:0.936700\n",
      "Train-Epoch:281, Loss:0.543380\n",
      "Test-Accuracy:0.935700\n",
      "Train-Epoch:282, Loss:0.509386\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:283, Loss:0.487721\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:284, Loss:0.499053\n",
      "Test-Accuracy:0.936600\n",
      "Train-Epoch:285, Loss:0.489640\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:286, Loss:0.519263\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:287, Loss:0.504935\n",
      "Test-Accuracy:0.936300\n",
      "Train-Epoch:288, Loss:0.486343\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:289, Loss:0.493041\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:290, Loss:0.518173\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:291, Loss:0.508099\n",
      "Test-Accuracy:0.937100\n",
      "Train-Epoch:292, Loss:0.524622\n",
      "Test-Accuracy:0.936400\n",
      "Train-Epoch:293, Loss:0.541163\n",
      "Test-Accuracy:0.936800\n",
      "Train-Epoch:294, Loss:0.529480\n",
      "Test-Accuracy:0.937300\n",
      "Train-Epoch:295, Loss:0.521531\n",
      "Test-Accuracy:0.937800\n",
      "Train-Epoch:296, Loss:0.514337\n",
      "Test-Accuracy:0.936900\n",
      "Train-Epoch:297, Loss:0.504110\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:298, Loss:0.490387\n",
      "Test-Accuracy:0.937000\n",
      "Train-Epoch:299, Loss:0.494493\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:300, Loss:0.509994\n",
      "Test-Accuracy:0.938000\n",
      "Train-Epoch:301, Loss:0.531504\n",
      "Test-Accuracy:0.937200\n",
      "Train-Epoch:302, Loss:0.517381\n",
      "Test-Accuracy:0.935300\n",
      "Train-Epoch:303, Loss:0.494807\n",
      "Test-Accuracy:0.934800\n",
      "Train-Epoch:304, Loss:0.533566\n",
      "Test-Accuracy:0.935200\n",
      "Train-Epoch:305, Loss:0.505855\n",
      "Test-Accuracy:0.934700\n",
      "Train-Epoch:306, Loss:0.558716\n",
      "Test-Accuracy:0.935000\n",
      "Train-Epoch:307, Loss:0.517722\n",
      "Test-Accuracy:0.936200\n",
      "Train-Epoch:308, Loss:0.512610\n",
      "Test-Accuracy:0.936100\n",
      "Train-Epoch:309, Loss:0.537539\n",
      "Test-Accuracy:0.935300\n",
      "Train-Epoch:310, Loss:0.497978\n",
      "Test-Accuracy:0.935100\n"
     ]
    }
   ],
   "source": [
    "path_current = os.getcwd()\n",
    "path_ckpt = os.path.join(path_current, 'cache/models/teacher/')\n",
    "path_loss = os.path.join(path_current,'cache/experimental_data/')\n",
    "path_cifar = \"/home/ubuntu/datasets/\"\n",
    "train_teacher = TeacherTrainer(path_ckpt, path_loss, path_cifar, bs=128, num_epochsaving=150)\n",
    "train_teacher.train(310)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# test model accuracy"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Avg. Loss: 0.002731, Accuracy: 0.938000\n"
     ]
    }
   ],
   "source": [
    "net = ResNet34().cuda()\n",
    "path_cifar = \"/home/ubuntu/datasets/\"\n",
    "ckpt = torch.load('/home/ubuntu/YZP/gitee/paper-reading/DeepInversion/cache/models/teacher/teacher__accr0.938000_epoch200.pth')\n",
    "#print(ckpt['epoch'])\n",
    "net.load_state_dict(ckpt['net'])\n",
    "transform_train = transforms.Compose([\n",
    "    transforms.RandomCrop(32, padding=4),\n",
    "    transforms.RandomHorizontalFlip(),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "])\n",
    "\n",
    "transform_test = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "])\n",
    "\n",
    "data_train = CIFAR10(path_cifar,\n",
    "                    transform=transform_train)\n",
    "data_test = CIFAR10(path_cifar,\n",
    "                    train=False,\n",
    "                    transform=transform_test)\n",
    "\n",
    "data_train_loader = DataLoader(data_train, batch_size=128, shuffle=True, num_workers=8)\n",
    "data_test_loader = DataLoader(data_test, batch_size=100, num_workers=0)\n",
    "criterion = nn.CrossEntropyLoss().cuda()\n",
    "def test():\n",
    "    global acc, acc_best\n",
    "    net.eval()\n",
    "    total_correct = 0\n",
    "    avg_loss = 0.0\n",
    "    with torch.no_grad():\n",
    "        for i, (images, labels) in enumerate(data_test_loader):\n",
    "            images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "            output = net(images)\n",
    "            avg_loss += criterion(output, labels).sum()\n",
    "            pred = output.data.max(1)[1]\n",
    "            total_correct += pred.eq(labels.data.view_as(pred)).sum()\n",
    " \n",
    "    avg_loss /= len(data_test)\n",
    "    acc = float(total_correct) / len(data_test)\n",
    "    #if acc_best < acc:\n",
    "    #    acc_best = acc\n",
    "    print('Test Avg. Loss: %f, Accuracy: %f' % (avg_loss.data.item(), acc))\n",
    "test()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "b616bc69f5e56e869b6afa9b75bee44fb0b9cfffce48900ded2d9beddfe2e77a"
  },
  "kernelspec": {
   "display_name": "Python 3.6.13 64-bit ('torch-1.7': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
