{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "# 1-总体思路\n",
    "GAN + 知识蒸馏\n",
    "\n",
    "网络总共有:\n",
    "1. generator\n",
    "2. teacher（已经训练好）\n",
    "3. student"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torchvision.transforms as transforms\n",
    "from torchvision.datasets.mnist import MNIST\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.autograd import Variable\n",
    "# import import_ipynb\n",
    "# from model import Generator, LeNet5, LeNet5Half"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 2-网络的定义"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.1 教师网络LeNet"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "source": [
    "class LeNet5(nn.Module):\n",
    "    \"\"\"\n",
    "    Input: [batch, 1, 32, 32]\n",
    "    Output:[batch, 10]\n",
    "    \"\"\"\n",
    "    def __init__(self):\n",
    "        super(LeNet5, self).__init__()\n",
    "\n",
    "        self.conv1 = nn.Conv2d(1, 6, kernel_size=(5, 5))    #[28, 28]\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)  #[14, 14]\n",
    "        self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5))       #[10, 10]\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)  #[5, 5]\n",
    "        self.conv3 = nn.Conv2d(16, 120, kernel_size=(5, 5)) #[1, 1]\n",
    "        self.relu3 = nn.ReLU()\n",
    "        self.fc1 = nn.Linear(120, 84)\n",
    "        self.relu4 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(84, 10)\n",
    "\n",
    "    def forward(self, img, out_feature=False):\n",
    "        output = self.conv1(img)\n",
    "        output = self.relu1(output)\n",
    "        output = self.maxpool1(output)  \n",
    "        \n",
    "        output = self.conv2(output)\n",
    "        output = self.relu2(output)\n",
    "        output = self.maxpool2(output)\n",
    "        \n",
    "        output = self.conv3(output)\n",
    "        output = self.relu3(output)\n",
    "        \n",
    "        feature = output.view(-1, 120)\n",
    "        output = self.fc1(feature)\n",
    "        output = self.relu4(output)\n",
    "        output = self.fc2(output)\n",
    "        if out_feature == False:\n",
    "            return output\n",
    "        else:\n",
    "            return output,feature"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.2 学生网络LeNet5Half"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "source": [
    "class LeNet5Half(nn.Module):\n",
    "\n",
    "    def __init__(self):\n",
    "        super(LeNet5Half, self).__init__()\n",
    "\n",
    "        self.conv1 = nn.Conv2d(1, 3, kernel_size=(5, 5))\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n",
    "\n",
    "        self.conv2 = nn.Conv2d(3, 8, kernel_size=(5, 5))\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)\n",
    "\n",
    "        self.conv3 = nn.Conv2d(8, 60, kernel_size=(5, 5))\n",
    "        self.relu3 = nn.ReLU()\n",
    "        self.fc1 = nn.Linear(60, 42)\n",
    "        self.relu4 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(42, 10)\n",
    "\n",
    "    def forward(self, img, out_feature=False):\n",
    "        output = self.conv1(img)\n",
    "        output = self.relu1(output)\n",
    "        output = self.maxpool1(output)\n",
    "\n",
    "        output = self.conv2(output)\n",
    "        output = self.relu2(output)\n",
    "        output = self.maxpool2(output)\n",
    "\n",
    "        output = self.conv3(output)\n",
    "        output = self.relu3(output)\n",
    "        feature = output.view(-1, 60)\n",
    "\n",
    "        output = self.fc1(feature)\n",
    "        output = self.relu4(output)\n",
    "        output = self.fc2(output)\n",
    "\n",
    "        if out_feature == False:\n",
    "            return output\n",
    "        else:\n",
    "            return output, feature"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 2.3生成器网络\n",
    "input:[batch_size, 100]\n",
    "\n",
    "output:[batch_size, 32, 32]"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "source": [
    "class Generator(nn.Module):\n",
    "    \"\"\"\n",
    "    Input:[batch, 100]\n",
    "    Out:[batch, 1, 32, 32]\n",
    "    \"\"\"\n",
    "    def __init__(self):\n",
    "        super(Generator, self).__init__()\n",
    "\n",
    "        self.init_size = 32 // 4\n",
    "        self.l1 = nn.Sequential(nn.Linear(100, 128*self.init_size**2))\n",
    "\n",
    "        self.conv_blocks0 = nn.Sequential(\n",
    "            nn.BatchNorm2d(128),\n",
    "        )\n",
    "        self.conv_blocks1 = nn.Sequential(\n",
    "            nn.Conv2d(128, 128, 3, stride=1, padding=1),\n",
    "            nn.BatchNorm2d(128, 0.8),\n",
    "            nn.LeakyReLU(0.2, inplace=True),\n",
    "        )\n",
    "        self.conv_blocks2 = nn.Sequential(\n",
    "            nn.Conv2d(128, 64, 3, stride=1, padding=1),\n",
    "            nn.BatchNorm2d(64, 0.8),\n",
    "            nn.LeakyReLU(0.2, inplace=True),\n",
    "            nn.Conv2d(64, 1, 3, stride=1, padding=1),\n",
    "            nn.Tanh(),\n",
    "            nn.BatchNorm2d(1, affine=False) \n",
    "        )\n",
    "\n",
    "    def forward(self, z):\n",
    "        out = self.l1(z)\n",
    "        out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n",
    "        img = self.conv_blocks0(out)\n",
    "        img = nn.functional.interpolate(img,scale_factor=2)\n",
    "        img = self.conv_blocks1(img)\n",
    "        img = nn.functional.interpolate(img,scale_factor=2)\n",
    "        img = self.conv_blocks2(img)\n",
    "        return img"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 3-教师网络的训练"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "数据集：MNIST\n",
    "\n",
    "损失函数：交叉熵\n",
    "\n",
    "梯度更新方式：Adam\n",
    "\n",
    "学习率：lr = 0.001"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.1 训练类定义"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.2 开始训练及保存参数"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 3-训练类定义\n",
    "激活损失函数：loss_activation\n",
    "\n",
    "生成器损失函数：loss_noe_hot"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.1教师网络训练类"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "source": [
    "class TeacherTrainer:\n",
    "    def __init__(self, path_teacher_ckpt):\n",
    "        self.path_teacher_ckpt = path_teacher_ckpt\n",
    "        self.teacher = LeNet5().cuda()\n",
    "        self.criterion = nn.CrossEntropyLoss().cuda()\n",
    "        self.optimizer = torch.optim.Adam(self.teacher.parameters(), lr = 0.001)\n",
    "        self.loss_list = []\n",
    "        self.best_accr = 0\n",
    "        self.data_train = MNIST('~/workspace/dataset/', transform=transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]), download=False)\n",
    "        self.data_test = MNIST('~/workspace/dataset/', train=False, transform=transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n",
    "        self.data_train_loader = DataLoader(self.data_train, batch_size=256, shuffle=True, num_workers=8)\n",
    "        self.data_test_loader = DataLoader(self.data_test, batch_size=1024, num_workers=8)\n",
    "\n",
    "    def train(self, epochs):\n",
    "        self.teacher.train()\n",
    "        for epoch in range(1, epochs+1):\n",
    "            loss_epoch = 0\n",
    "            for i, (images, labels) in enumerate(self.data_train_loader, start=1):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "                self.optimizer.zero_grad()\n",
    "                outputs = self.teacher(images)\n",
    "                loss = self.criterion(outputs, labels)\n",
    "                # if i %100== 0:\n",
    "                #     print('Train-Epoch %d, Batch:%d, Loss %f' % (epoch, i, loss.data.item()))\n",
    "                loss.backward()\n",
    "                self.optimizer.step()\n",
    "                loss_epoch += loss.data.item()\n",
    "            self.loss_list.append(loss_epoch)\n",
    "            print('Finish epoch: %d, sum loss:%f' % (epoch, loss_epoch))\n",
    "            if epoch % 1== 0:\n",
    "                self.test(epoch)\n",
    "        lossfile = np.array(self.loss_list)\n",
    "        np.save('/home/yinzp/gitee/paper-reading/DAFL/cache/models/teacher/teacher_loss_{}'.format(epochs), lossfile)\n",
    "    def test(self, epoch):\n",
    "        self.teacher.eval()\n",
    "        total_correct = 0\n",
    "        with torch.no_grad():\n",
    "            for i, (images, labels) in enumerate(self.data_test_loader, start=1):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "                output = self.teacher(images)\n",
    "                pred = output.data.max(1)[1]\n",
    "                total_correct += pred.eq(labels.data.view_as(pred)).sum()\n",
    "            \n",
    "        acc = float(total_correct) / len(self.data_test)\n",
    "        if acc > self.best_accr:\n",
    "            self.best_accr = acc\n",
    "            self.save_model(self.path_teacher_ckpt, epoch)\n",
    "\n",
    "        print('Test Accuracy:%f' % (acc))\n",
    "\n",
    "    def save_model(self, path, epoch):\n",
    "        state = {'net': self.teacher.state_dict(), 'optimizer':self.optimizer.state_dict(), 'epoch':epoch}\n",
    "        filename = path + 'teacher__accr%f_epoch%d.pth'%(self.best_accr, epoch)\n",
    "        torch.save(state, filename)\n",
    "        \n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.2学生和生成器网络训练类"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "source": [
    "class StudentTrainer:\n",
    "    def __init__(self, teacher_ckpt_path, student_ckpt_path, path_dataset, path_loss, epochs, batch_size, latent_dim, lr_G, lr_S, oh, ie, a):\n",
    "        ## 训练参数\n",
    "        self.epochs = epochs\n",
    "        self.lr_G = lr_G\n",
    "        self.lr_S = lr_S\n",
    "        self.latent_dim = latent_dim\n",
    "        self.batch_size = batch_size\n",
    "        self.oh = oh\n",
    "        self.ie = ie\n",
    "        self.a = a\n",
    "        self.student_ckpt_path = student_ckpt_path\n",
    "        self.best_accr = 0\n",
    "        self.best_epoch = 0\n",
    "        self.loss_list = []\n",
    "        self.path_loss = path_loss\n",
    "        ## 测试数据集\n",
    "        self.data_test = MNIST(path_dataset, train=False, transform=transforms.Compose([transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]))\n",
    "        self.data_test_loader = DataLoader(self.data_test, batch_size=64, num_workers=8)\n",
    "        ## 网络定义\n",
    "        self.teacher = LeNet5().cuda()\n",
    "        self.teacher.load_state_dict(torch.load(teacher_ckpt_path)['net'])\n",
    "        self.student = LeNet5Half().cuda()\n",
    "        self.generate = Generator().cuda()\n",
    "        ## 损失函数和参数更新方式\n",
    "        self.criterion = nn.CrossEntropyLoss().cuda()\n",
    "        self.optimizer_G = torch.optim.Adam(self.generate.parameters(), lr = self.lr_G)\n",
    "        self.optimizer_S = torch.optim.SGD(self.student.parameters(), lr = self.lr_S)\n",
    "        #self.optimizer_S = torch.optim.Adam(self.student.parameters(), lr = self.lr_S)\n",
    "        self.lr_scheduler_S = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer_S, T_0=5, T_mult=2)\n",
    "        self.lr_scheduler_G = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optimizer_G, T_0=5, T_mult=2)\n",
    "    def train(self):\n",
    "        for epoch in range(1, self.epochs+1):\n",
    "            self.generate.train()\n",
    "            self.student.train()\n",
    "            loss_epoch = 0\n",
    "            for i in range(1, 121):\n",
    "                z = Variable(torch.randn(self.batch_size, self.latent_dim)).cuda()\n",
    "                self.optimizer_G.zero_grad()\n",
    "                self.optimizer_S.zero_grad()\n",
    "                gen_img = self.generate(z)\n",
    "                output, features = self.teacher(gen_img, out_feature = True)\n",
    "                pseudo_labels = output.data.max(1)[1]\n",
    "                # 用于纠正Generator的损失\n",
    "                ## 损失1：激活损失\n",
    "                loss_active = -features.abs().mean()\n",
    "                ## 损失2：one-hot损失\n",
    "                loss_onehot = self.criterion(output, pseudo_labels)\n",
    "                ## 损失3：信息熵损失\n",
    "                softmax_o_T = F.softmax(output, dim = 1).mean(dim=0)\n",
    "                loss_information_entropy = (softmax_o_T * torch.log10(softmax_o_T)).sum()\n",
    "\n",
    "                # 用于纠正学生网络的损失：知识蒸馏损失\n",
    "                loss_kd = self.kdloss(self.student(gen_img.detach()), output.detach())\n",
    "                # 硬损失\n",
    "                loss_kd_h = self.criterion(self.student(gen_img.detach()), pseudo_labels)\n",
    "\n",
    "                loss = loss_onehot * self.oh + loss_information_entropy * self.ie + loss_active * self.a + loss_kd + loss_kd_h\n",
    "                loss.backward()\n",
    "                self.optimizer_S.step()\n",
    "                self.optimizer_G.step()\n",
    "                # self.lr_scheduler_S.step()\n",
    "                # self.lr_scheduler_G.step()\n",
    "                loss_epoch += loss.data.item()\n",
    "            print('Epoch%d, loss%f' % (epoch, loss_epoch))\n",
    "            self.loss_list.append(loss_epoch)\n",
    "            self.test(epoch)\n",
    "        loss_file = np.array(self.loss_list)\n",
    "        np.save(self.path_loss + 'student_SGD_epoch_{}'.format(self.epochs), loss_file)\n",
    "        \n",
    "\n",
    "    def test(self, epoch):\n",
    "        total_corerect = 0\n",
    "        with torch.no_grad():\n",
    "            for i, (images, labels) in enumerate(self.data_test_loader, start=1):\n",
    "                images, labels = images.cuda(), labels.cuda()\n",
    "                self.student.eval()\n",
    "                output = self.student(images)\n",
    "                pred = output.data.max(1)[1]\n",
    "                total_corerect += pred.eq(labels.view_as(pred)).sum()\n",
    "        accr = float(total_corerect) / len(self.data_test)\n",
    "        print('Test Accuracy: %f' % (accr))\n",
    "        if accr > self.best_accr:\n",
    "            self.best_accr, self.best_epoch = accr, epoch\n",
    "            self.save_model(self.best_epoch, self.best_accr)\n",
    "\n",
    "    def kdloss(self, y, teacher_scores):\n",
    "        p = F.log_softmax(y, dim=1)\n",
    "        q = F.softmax(teacher_scores, dim=1)\n",
    "        l_kl = F.kl_div(p, q, size_average=False) / y.shape[0]\n",
    "        return l_kl\n",
    "    \n",
    "    def save_model(self, epochs, accr):\n",
    "        state = {'net': self.student.state_dict(), 'optimizer':self.optimizer_S.state_dict(), 'epoch':epochs}\n",
    "        filename = self.student_ckpt_path + 'student_accr%f_epoch_%d.pth' %(accr, epochs)\n",
    "        torch.save(state, filename)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 3.3 生成器和学生函数分开训练"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 开始训练"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 训练教师网络"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "source": [
    "trainteacher = TeacherTrainer('/home/yinzp/gitee/paper-reading/DAFL/cache/models/teacher/')\n",
    "epochs = 150\n",
    "trainteacher.train(epochs)"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Finish epoch: 1, sum loss:103.485972\n",
      "Test Accuracy:0.959100\n",
      "Finish epoch: 2, sum loss:24.745323\n",
      "Test Accuracy:0.975300\n",
      "Finish epoch: 3, sum loss:15.846061\n",
      "Test Accuracy:0.984200\n",
      "Finish epoch: 4, sum loss:12.867239\n",
      "Test Accuracy:0.984600\n",
      "Finish epoch: 5, sum loss:10.997393\n",
      "Test Accuracy:0.985500\n",
      "Finish epoch: 6, sum loss:9.722271\n",
      "Test Accuracy:0.986100\n",
      "Finish epoch: 7, sum loss:8.097537\n",
      "Test Accuracy:0.989200\n",
      "Finish epoch: 8, sum loss:7.421795\n",
      "Test Accuracy:0.987300\n",
      "Finish epoch: 9, sum loss:6.605359\n",
      "Test Accuracy:0.987400\n",
      "Finish epoch: 10, sum loss:5.920653\n",
      "Test Accuracy:0.988600\n",
      "Finish epoch: 11, sum loss:5.374886\n",
      "Test Accuracy:0.990200\n",
      "Finish epoch: 12, sum loss:4.719485\n",
      "Test Accuracy:0.990100\n",
      "Finish epoch: 13, sum loss:4.174470\n",
      "Test Accuracy:0.990400\n",
      "Finish epoch: 14, sum loss:4.173966\n",
      "Test Accuracy:0.989100\n",
      "Finish epoch: 15, sum loss:3.220392\n",
      "Test Accuracy:0.989200\n",
      "Finish epoch: 16, sum loss:3.376388\n",
      "Test Accuracy:0.989400\n",
      "Finish epoch: 17, sum loss:2.903660\n",
      "Test Accuracy:0.990100\n",
      "Finish epoch: 18, sum loss:2.921156\n",
      "Test Accuracy:0.989000\n",
      "Finish epoch: 19, sum loss:2.615902\n",
      "Test Accuracy:0.989300\n",
      "Finish epoch: 20, sum loss:2.474096\n",
      "Test Accuracy:0.990000\n",
      "Finish epoch: 21, sum loss:2.428174\n",
      "Test Accuracy:0.989000\n",
      "Finish epoch: 22, sum loss:1.945356\n",
      "Test Accuracy:0.989800\n",
      "Finish epoch: 23, sum loss:2.134622\n",
      "Test Accuracy:0.989900\n",
      "Finish epoch: 24, sum loss:1.491273\n",
      "Test Accuracy:0.990700\n",
      "Finish epoch: 25, sum loss:1.582515\n",
      "Test Accuracy:0.989600\n",
      "Finish epoch: 26, sum loss:1.942966\n",
      "Test Accuracy:0.990500\n",
      "Finish epoch: 27, sum loss:1.814107\n",
      "Test Accuracy:0.989100\n",
      "Finish epoch: 28, sum loss:1.515356\n",
      "Test Accuracy:0.988700\n",
      "Finish epoch: 29, sum loss:1.434733\n",
      "Test Accuracy:0.989400\n",
      "Finish epoch: 30, sum loss:1.933559\n",
      "Test Accuracy:0.988700\n",
      "Finish epoch: 31, sum loss:1.243559\n",
      "Test Accuracy:0.989300\n",
      "Finish epoch: 32, sum loss:1.142645\n",
      "Test Accuracy:0.989600\n",
      "Finish epoch: 33, sum loss:0.866563\n",
      "Test Accuracy:0.989500\n",
      "Finish epoch: 34, sum loss:0.743250\n",
      "Test Accuracy:0.988300\n",
      "Finish epoch: 35, sum loss:2.199935\n",
      "Test Accuracy:0.989700\n",
      "Finish epoch: 36, sum loss:0.927712\n",
      "Test Accuracy:0.990300\n",
      "Finish epoch: 37, sum loss:1.283323\n",
      "Test Accuracy:0.989100\n",
      "Finish epoch: 38, sum loss:0.728416\n",
      "Test Accuracy:0.990600\n",
      "Finish epoch: 39, sum loss:0.792648\n",
      "Test Accuracy:0.990800\n",
      "Finish epoch: 40, sum loss:1.421082\n",
      "Test Accuracy:0.988800\n",
      "Finish epoch: 41, sum loss:1.221938\n",
      "Test Accuracy:0.989500\n",
      "Finish epoch: 42, sum loss:0.615280\n",
      "Test Accuracy:0.991100\n",
      "Finish epoch: 43, sum loss:0.875998\n",
      "Test Accuracy:0.989400\n",
      "Finish epoch: 44, sum loss:1.067939\n",
      "Test Accuracy:0.990200\n",
      "Finish epoch: 45, sum loss:0.953497\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 46, sum loss:0.478620\n",
      "Test Accuracy:0.989100\n",
      "Finish epoch: 47, sum loss:0.835123\n",
      "Test Accuracy:0.988100\n",
      "Finish epoch: 48, sum loss:1.124226\n",
      "Test Accuracy:0.990200\n",
      "Finish epoch: 49, sum loss:1.035365\n",
      "Test Accuracy:0.989300\n",
      "Finish epoch: 50, sum loss:0.486485\n",
      "Test Accuracy:0.990100\n",
      "Finish epoch: 51, sum loss:0.318625\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 52, sum loss:0.743710\n",
      "Test Accuracy:0.988800\n",
      "Finish epoch: 53, sum loss:2.083408\n",
      "Test Accuracy:0.985000\n",
      "Finish epoch: 54, sum loss:0.690561\n",
      "Test Accuracy:0.990000\n",
      "Finish epoch: 55, sum loss:0.257482\n",
      "Test Accuracy:0.990600\n",
      "Finish epoch: 56, sum loss:0.326101\n",
      "Test Accuracy:0.991000\n",
      "Finish epoch: 57, sum loss:0.247821\n",
      "Test Accuracy:0.989900\n",
      "Finish epoch: 58, sum loss:1.245504\n",
      "Test Accuracy:0.983200\n",
      "Finish epoch: 59, sum loss:1.883189\n",
      "Test Accuracy:0.991300\n",
      "Finish epoch: 60, sum loss:0.116180\n",
      "Test Accuracy:0.991000\n",
      "Finish epoch: 61, sum loss:0.054002\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 62, sum loss:0.011995\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 63, sum loss:0.006874\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 64, sum loss:0.005249\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 65, sum loss:0.004114\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 66, sum loss:0.003573\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 67, sum loss:0.002965\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 68, sum loss:0.002488\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 69, sum loss:0.002139\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 70, sum loss:0.001916\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 71, sum loss:0.001627\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 72, sum loss:0.001407\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 73, sum loss:0.001194\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 74, sum loss:0.001069\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 75, sum loss:0.000927\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 76, sum loss:0.000818\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 77, sum loss:0.000705\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 78, sum loss:0.000626\n",
      "Test Accuracy:0.991400\n",
      "Finish epoch: 79, sum loss:0.000540\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 80, sum loss:0.000484\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 81, sum loss:0.000421\n",
      "Test Accuracy:0.991500\n",
      "Finish epoch: 82, sum loss:0.000368\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 83, sum loss:0.000326\n",
      "Test Accuracy:0.991300\n",
      "Finish epoch: 84, sum loss:0.000302\n",
      "Test Accuracy:0.991300\n",
      "Finish epoch: 85, sum loss:0.000248\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 86, sum loss:0.000227\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 87, sum loss:0.000209\n",
      "Test Accuracy:0.991300\n",
      "Finish epoch: 88, sum loss:0.000174\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 89, sum loss:0.000159\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 90, sum loss:0.000135\n",
      "Test Accuracy:0.991400\n",
      "Finish epoch: 91, sum loss:0.000117\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 92, sum loss:0.000108\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 93, sum loss:0.000099\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 94, sum loss:0.000086\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 95, sum loss:0.000080\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 96, sum loss:0.000067\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 97, sum loss:0.000056\n",
      "Test Accuracy:0.991600\n",
      "Finish epoch: 98, sum loss:0.000051\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 99, sum loss:0.000047\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 100, sum loss:0.000038\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 101, sum loss:0.000035\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 102, sum loss:0.000030\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 103, sum loss:0.000028\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 104, sum loss:0.000027\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 105, sum loss:0.000030\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 106, sum loss:0.000020\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 107, sum loss:0.000016\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 108, sum loss:0.000014\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 109, sum loss:0.000014\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 110, sum loss:0.000011\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 111, sum loss:0.000010\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 112, sum loss:0.000009\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 113, sum loss:0.000008\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 114, sum loss:0.000007\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 115, sum loss:0.000006\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 116, sum loss:0.000005\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 117, sum loss:0.000005\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 118, sum loss:0.000004\n",
      "Test Accuracy:0.992300\n",
      "Finish epoch: 119, sum loss:0.000004\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 120, sum loss:0.000003\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 121, sum loss:0.000003\n",
      "Test Accuracy:0.992300\n",
      "Finish epoch: 122, sum loss:0.000003\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 123, sum loss:0.000002\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 124, sum loss:0.000002\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 125, sum loss:0.000002\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 126, sum loss:0.000001\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 127, sum loss:0.000001\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 128, sum loss:0.000001\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 129, sum loss:0.000001\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 130, sum loss:0.000001\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 131, sum loss:0.000001\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 132, sum loss:0.000001\n",
      "Test Accuracy:0.991700\n",
      "Finish epoch: 133, sum loss:0.000001\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 134, sum loss:0.000001\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 135, sum loss:0.000000\n",
      "Test Accuracy:0.991800\n",
      "Finish epoch: 136, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 137, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 138, sum loss:0.000000\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 139, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 140, sum loss:0.000000\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 141, sum loss:0.000000\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 142, sum loss:0.000000\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 143, sum loss:0.000000\n",
      "Test Accuracy:0.992200\n",
      "Finish epoch: 144, sum loss:0.000000\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 145, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 146, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 147, sum loss:0.000000\n",
      "Test Accuracy:0.992100\n",
      "Finish epoch: 148, sum loss:0.000000\n",
      "Test Accuracy:0.991900\n",
      "Finish epoch: 149, sum loss:0.000000\n",
      "Test Accuracy:0.992000\n",
      "Finish epoch: 150, sum loss:0.000000\n",
      "Test Accuracy:0.992000\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 训练学生网络\n"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "source": [
    "current_path = os.getcwd()\n",
    "path_ckpt_t_teacher = current_path + '/cache/models/teacher/teacher__accr0.992300_epoch118.pth'\n",
    "path_ckpt_student = current_path + '/cache/models/student/'\n",
    "path_dataset = '/home/yinzp/workspace/dataset/'\n",
    "path_loss = current_path + '/cache/models/student/'\n",
    "lr_G = 0.2\n",
    "lr_S = 2e-3\n",
    "epochs = 200\n",
    "batch_size = 512\n",
    "latent_dim = 100\n",
    "oh = 1\n",
    "ie = 5\n",
    "a = 0.1\n",
    "torch.cuda.empty_cache()\n",
    "trainstudent = StudentTrainer(path_ckpt_t_teacher, path_ckpt_student, path_dataset, path_loss, epochs, batch_size, latent_dim, lr_G, lr_S, oh, ie, a)\n",
    "trainstudent.train()"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stderr",
     "text": [
      "/home/yinzp/.local/lib/python3.6/site-packages/torch/nn/_reduction.py:42: UserWarning: size_average and reduce args will be deprecated, please use reduction='sum' instead.\n",
      "  warnings.warn(warning.format(ret))\n"
     ]
    },
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Epoch1, loss-45.903053\n",
      "Test Accuracy: 0.102900\n",
      "Epoch2, loss-67.552167\n",
      "Test Accuracy: 0.103200\n",
      "Epoch3, loss-69.354913\n",
      "Test Accuracy: 0.103200\n",
      "Epoch4, loss-71.558287\n",
      "Test Accuracy: 0.103200\n",
      "Epoch5, loss-73.528788\n",
      "Test Accuracy: 0.103200\n",
      "Epoch6, loss-84.936398\n",
      "Test Accuracy: 0.103400\n",
      "Epoch7, loss-146.076957\n",
      "Test Accuracy: 0.097400\n",
      "Epoch8, loss-193.976094\n",
      "Test Accuracy: 0.097400\n",
      "Epoch9, loss-251.662215\n",
      "Test Accuracy: 0.097400\n",
      "Epoch10, loss-280.368194\n",
      "Test Accuracy: 0.097400\n",
      "Epoch11, loss-281.429190\n",
      "Test Accuracy: 0.097400\n",
      "Epoch12, loss-281.806456\n",
      "Test Accuracy: 0.097400\n",
      "Epoch13, loss-282.056922\n",
      "Test Accuracy: 0.097400\n",
      "Epoch14, loss-282.225792\n",
      "Test Accuracy: 0.097400\n",
      "Epoch15, loss-282.340767\n",
      "Test Accuracy: 0.097400\n",
      "Epoch16, loss-282.419549\n",
      "Test Accuracy: 0.097400\n",
      "Epoch17, loss-282.473769\n",
      "Test Accuracy: 0.097400\n",
      "Epoch18, loss-282.511194\n",
      "Test Accuracy: 0.097400\n",
      "Epoch19, loss-282.536912\n",
      "Test Accuracy: 0.097400\n",
      "Epoch20, loss-282.554568\n",
      "Test Accuracy: 0.097400\n",
      "Epoch21, loss-282.566879\n",
      "Test Accuracy: 0.097400\n",
      "Epoch22, loss-282.575479\n",
      "Test Accuracy: 0.097400\n",
      "Epoch23, loss-282.581497\n",
      "Test Accuracy: 0.097400\n",
      "Epoch24, loss-282.585703\n",
      "Test Accuracy: 0.097400\n",
      "Epoch25, loss-282.588656\n",
      "Test Accuracy: 0.097400\n",
      "Epoch26, loss-282.590733\n",
      "Test Accuracy: 0.097400\n",
      "Epoch27, loss-282.592204\n",
      "Test Accuracy: 0.097400\n",
      "Epoch28, loss-282.593237\n",
      "Test Accuracy: 0.097400\n",
      "Epoch29, loss-282.593963\n",
      "Test Accuracy: 0.097400\n",
      "Epoch30, loss-282.594486\n",
      "Test Accuracy: 0.097400\n",
      "Epoch31, loss-282.594850\n",
      "Test Accuracy: 0.097400\n",
      "Epoch32, loss-282.595112\n",
      "Test Accuracy: 0.097400\n",
      "Epoch33, loss-282.595293\n",
      "Test Accuracy: 0.097400\n",
      "Epoch34, loss-282.595426\n",
      "Test Accuracy: 0.097400\n",
      "Epoch35, loss-282.595510\n",
      "Test Accuracy: 0.097400\n",
      "Epoch36, loss-282.595588\n",
      "Test Accuracy: 0.097400\n",
      "Epoch37, loss-282.595622\n",
      "Test Accuracy: 0.097400\n",
      "Epoch38, loss-282.595671\n",
      "Test Accuracy: 0.097400\n",
      "Epoch39, loss-282.595686\n",
      "Test Accuracy: 0.097400\n",
      "Epoch40, loss-282.595690\n",
      "Test Accuracy: 0.097400\n",
      "Epoch41, loss-282.595711\n",
      "Test Accuracy: 0.097400\n",
      "Epoch42, loss-282.595733\n",
      "Test Accuracy: 0.097400\n",
      "Epoch43, loss-282.595721\n",
      "Test Accuracy: 0.097400\n",
      "Epoch44, loss-282.595741\n",
      "Test Accuracy: 0.097400\n",
      "Epoch45, loss-282.595755\n",
      "Test Accuracy: 0.097400\n",
      "Epoch46, loss-282.595737\n",
      "Test Accuracy: 0.097400\n",
      "Epoch47, loss-282.595728\n",
      "Test Accuracy: 0.097400\n",
      "Epoch48, loss-282.595728\n",
      "Test Accuracy: 0.097400\n",
      "Epoch49, loss-282.595746\n",
      "Test Accuracy: 0.097400\n",
      "Epoch50, loss-282.595751\n",
      "Test Accuracy: 0.097400\n",
      "Epoch51, loss-282.595762\n",
      "Test Accuracy: 0.097400\n",
      "Epoch52, loss-282.595762\n",
      "Test Accuracy: 0.097400\n",
      "Epoch53, loss-282.595737\n",
      "Test Accuracy: 0.097400\n",
      "Epoch54, loss-282.595734\n",
      "Test Accuracy: 0.097400\n",
      "Epoch55, loss-282.595733\n",
      "Test Accuracy: 0.097400\n",
      "Epoch56, loss-282.595732\n",
      "Test Accuracy: 0.097400\n",
      "Epoch57, loss-282.595726\n",
      "Test Accuracy: 0.097400\n",
      "Epoch58, loss-282.595742\n",
      "Test Accuracy: 0.097400\n",
      "Epoch59, loss-282.595743\n",
      "Test Accuracy: 0.097400\n",
      "Epoch60, loss-282.595737\n",
      "Test Accuracy: 0.097400\n",
      "Epoch61, loss-282.595749\n",
      "Test Accuracy: 0.097400\n",
      "Epoch62, loss-282.595754\n",
      "Test Accuracy: 0.097400\n",
      "Epoch63, loss-282.595761\n",
      "Test Accuracy: 0.097400\n",
      "Epoch64, loss-282.595748\n",
      "Test Accuracy: 0.097400\n",
      "Epoch65, loss-282.595729\n",
      "Test Accuracy: 0.097400\n",
      "Epoch66, loss-282.595724\n",
      "Test Accuracy: 0.097400\n",
      "Epoch67, loss-282.595729\n",
      "Test Accuracy: 0.097400\n",
      "Epoch68, loss-282.595737\n",
      "Test Accuracy: 0.097400\n",
      "Epoch69, loss-282.595719\n",
      "Test Accuracy: 0.097400\n",
      "Epoch70, loss-282.595722\n",
      "Test Accuracy: 0.097400\n",
      "Epoch71, loss-282.595735\n",
      "Test Accuracy: 0.097400\n",
      "Epoch72, loss-282.595720\n",
      "Test Accuracy: 0.097400\n",
      "Epoch73, loss-282.595711\n",
      "Test Accuracy: 0.097400\n",
      "Epoch74, loss-282.595711\n",
      "Test Accuracy: 0.097400\n",
      "Epoch75, loss-282.595729\n",
      "Test Accuracy: 0.097400\n",
      "Epoch76, loss-282.595734\n",
      "Test Accuracy: 0.097400\n",
      "Epoch77, loss-282.595725\n",
      "Test Accuracy: 0.097400\n",
      "Epoch78, loss-282.595724\n",
      "Test Accuracy: 0.097400\n",
      "Epoch79, loss-282.595728\n",
      "Test Accuracy: 0.097400\n",
      "Epoch80, loss-282.595727\n",
      "Test Accuracy: 0.097400\n",
      "Epoch81, loss-282.595725\n",
      "Test Accuracy: 0.097400\n",
      "Epoch82, loss-282.595733\n",
      "Test Accuracy: 0.097400\n",
      "Epoch83, loss-282.595746\n",
      "Test Accuracy: 0.097400\n",
      "Epoch84, loss-282.595731\n",
      "Test Accuracy: 0.097400\n",
      "Epoch85, loss-282.595754\n",
      "Test Accuracy: 0.097400\n",
      "Epoch86, loss-282.595730\n",
      "Test Accuracy: 0.097400\n",
      "Epoch87, loss-282.595711\n",
      "Test Accuracy: 0.097400\n",
      "Epoch88, loss-282.595719\n",
      "Test Accuracy: 0.097400\n",
      "Epoch89, loss-282.595724\n",
      "Test Accuracy: 0.097400\n",
      "Epoch90, loss-282.595711\n",
      "Test Accuracy: 0.097400\n",
      "Epoch91, loss-282.595712\n",
      "Test Accuracy: 0.097400\n",
      "Epoch92, loss-282.595732\n",
      "Test Accuracy: 0.097400\n",
      "Epoch93, loss-282.595734\n",
      "Test Accuracy: 0.097400\n",
      "Epoch94, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch95, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch96, loss-282.595732\n",
      "Test Accuracy: 0.097400\n",
      "Epoch97, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch98, loss-282.595737\n",
      "Test Accuracy: 0.097400\n",
      "Epoch99, loss-282.595729\n",
      "Test Accuracy: 0.097400\n",
      "Epoch100, loss-282.595716\n",
      "Test Accuracy: 0.097400\n",
      "Epoch101, loss-282.595721\n",
      "Test Accuracy: 0.097400\n",
      "Epoch102, loss-282.595716\n",
      "Test Accuracy: 0.097400\n",
      "Epoch103, loss-282.595735\n",
      "Test Accuracy: 0.097400\n",
      "Epoch104, loss-282.595740\n",
      "Test Accuracy: 0.097400\n",
      "Epoch105, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch106, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch107, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch108, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch109, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch110, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch111, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch112, loss-282.595739\n",
      "Test Accuracy: 0.097400\n",
      "Epoch113, loss-282.595739\n",
      "Test Accuracy: 0.097400\n"
     ]
    },
    {
     "output_type": "error",
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-50-229e896edc8b>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0mtrainstudent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mStudentTrainer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpath_ckpt_t_teacher\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath_ckpt_student\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath_dataset\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mpath_loss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mepochs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlatent_dim\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlr_G\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlr_S\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moh\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mie\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 16\u001b[0;31m \u001b[0mtrainstudent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m<ipython-input-49-6eb8e131ae2c>\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m     62\u001b[0m                 \u001b[0;31m# self.lr_scheduler_S.step()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     63\u001b[0m                 \u001b[0;31m# self.lr_scheduler_G.step()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 64\u001b[0;31m                 \u001b[0mloss_epoch\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitem\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     65\u001b[0m             \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'Epoch%d, loss%f'\u001b[0m \u001b[0;34m%\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mepoch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_epoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     66\u001b[0m             \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss_list\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mloss_epoch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.6.9",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.6.9 64-bit"
  },
  "interpreter": {
   "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}