{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 9,
   "source": [
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.autograd import Variable\n",
    "import torch.nn.functional as F\n",
    "from torchvision.datasets.mnist import MNIST\n",
    "from torchvision.datasets import CIFAR10, CIFAR100, ImageFolder\n",
    "from torchvision.datasets.imagenet import ImageNet\n",
    "from torch.utils.data import DataLoader\n",
    "import torchvision.transforms as transforms\n",
    "import numpy as np"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 网络定义"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 教师网络RestNet18, 34, 50, 101, 152"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "source": [
    "class BasicBlock(nn.Module):\n",
    "    expansion = 1\n",
    "\n",
    "    def __init__(self, in_planes, planes, stride=1):\n",
    "        super(BasicBlock, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "\n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or in_planes != self.expansion*planes:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*planes)\n",
    "            )\n",
    "    \n",
    "    def forward(self, x):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = self.bn2(self.conv2(out))\n",
    "        out += self.shortcut(x)\n",
    "        out =F.relu(out)\n",
    "        return out\n",
    "\n",
    "class Bottleneck(nn.Module):\n",
    "    expansion = 4\n",
    "\n",
    "    def __init__(self, in_planes, planes, stride=1):\n",
    "        super(Bottleneck, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(planes)\n",
    "        self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)\n",
    "        self.bn2 = nn.BatchNorm2d(planes)\n",
    "        self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)\n",
    "        self.bn3 = nn.BatchNorm2d(self.expansion*planes)\n",
    "\n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or in_planes != self.expansion*planes:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(self.expansion*planes)\n",
    "            )\n",
    "    def forward(self, x):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = F.relu(self.bn2(self.conv2(out)))\n",
    "        out = self.bn3(self.conv3(out))\n",
    "        out += self.shortcut(x)\n",
    "        return out\n",
    "\n",
    "\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "    def __init__(self, block, num_blocks, num_classes=10):\n",
    "        super(ResNet, self).__init__()\n",
    "        self.in_planes = 64\n",
    "\n",
    "        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)\n",
    "        self.bn1 = nn.BatchNorm2d(64)\n",
    "        self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)\n",
    "        self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)\n",
    "        self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)\n",
    "        self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)\n",
    "        self.linear = nn.Linear(512 * block.expansion, num_classes)\n",
    "        \n",
    "    def _make_layer(self, block, planes, num_blocks, stride):\n",
    "        strides = [stride] + [1]*(num_blocks-1)\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.in_planes, planes, stride))\n",
    "            self.in_planes = planes * block.expansion\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x, out_feature=False):\n",
    "        out = F.relu(self.bn1(self.conv1(x)))\n",
    "        out = self.layer1(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = F.avg_pool2d(out, 4)\n",
    "        feature = out.view(out.size(0), -1)\n",
    "        out = self.linear(feature)\n",
    "        if out_feature == False:\n",
    "            return out\n",
    "        else:\n",
    "            return out, feature\n",
    "def ResNet18(num_classes=10):\n",
    "    return ResNet(BasicBlock, [2,2,2,2], num_classes)\n",
    " \n",
    "def ResNet34(num_classes=10):\n",
    "    return ResNet(BasicBlock, [3,4,6,3], num_classes)\n",
    " \n",
    "def ResNet50(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,4,6,3], num_classes)\n",
    " \n",
    "def ResNet101(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,4,23,3], num_classes)\n",
    " \n",
    "def ResNet152(num_classes=10):\n",
    "    return ResNet(Bottleneck, [3,8,36,3], num_classes)"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 教师网络 LeNet5"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "source": [
    "class LeNet5(nn.Module):\n",
    "    \"\"\"\n",
    "    Input: [batch, 1, 32, 32]\n",
    "    Output:[batch, 10]\n",
    "    \"\"\"\n",
    "    def __init__(self):\n",
    "        super(LeNet5, self).__init__()\n",
    "\n",
    "        self.conv1 = nn.Conv2d(1, 6, kernel_size=(5, 5))    #[28, 28]\n",
    "        self.relu1 = nn.ReLU()\n",
    "        self.maxpool1 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)  #[14, 14]\n",
    "        self.conv2 = nn.Conv2d(6, 16, kernel_size=(5, 5))       #[10, 10]\n",
    "        self.relu2 = nn.ReLU()\n",
    "        self.maxpool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=2)  #[5, 5]\n",
    "        self.conv3 = nn.Conv2d(16, 120, kernel_size=(5, 5)) #[1, 1]\n",
    "        self.relu3 = nn.ReLU()\n",
    "        self.fc1 = nn.Linear(120, 84)\n",
    "        self.relu4 = nn.ReLU()\n",
    "        self.fc2 = nn.Linear(84, 10)\n",
    "\n",
    "    def forward(self, img, out_feature=False):\n",
    "        output = self.conv1(img)\n",
    "        output = self.relu1(output)\n",
    "        output = self.maxpool1(output)  \n",
    "        \n",
    "        output = self.conv2(output)\n",
    "        output = self.relu2(output)\n",
    "        output = self.maxpool2(output)\n",
    "        \n",
    "        output = self.conv3(output)\n",
    "        output = self.relu3(output)\n",
    "        \n",
    "        feature = output.view(-1, 120)\n",
    "        output = self.fc1(feature)\n",
    "        output = self.relu4(output)\n",
    "        output = self.fc2(output)\n",
    "        if out_feature == False:\n",
    "            return output\n",
    "        else:\n",
    "            return output,feature"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "# 网络训练"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 教师网络训练\n"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "source": [
    "class TeacherTrainer():\n",
    "    def __init__(self, epochs, path_dataset, path_ckpt, path_loss):\n",
    "        # 数据集\n",
    "        self.dataset_train = CIFAR10(path_dataset, transform = transforms.Compose([\n",
    "            transforms.RandomCrop(32, padding=4),\n",
    "            transforms.RandomHorizontalFlip(),\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "        ]))\n",
    "        self.dataset_test = CIFAR10(path_dataset, train = False, transform = transforms.Compose([\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "        ]))\n",
    "        self.dataset_train_loader = DataLoader(self.dataset_train, batch_size=128, shuffle=True, num_workers=8)\n",
    "        self.dataset_test_loader = DataLoader(self.dataset_test, batch_size=100, num_workers=8)\n",
    "        #网络和损失优化\n",
    "        self.net = ResNet34().cuda()\n",
    "        self.criterion = torch.nn.CrossEntropyLoss().cuda()\n",
    "        self.optimizer = torch.optim.SGD(self.net.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)\n",
    "\n",
    "        # 训练有关参数\n",
    "        self.epochs = epochs\n",
    "        self.best_accr = 0\n",
    "        self.list_loss = []\n",
    "        self.path_ckpt = path_ckpt\n",
    "        self.path_loss = path_loss\n",
    "\n",
    "    def train(self):\n",
    "        for epoch in range(1, self.epochs + 1):\n",
    "            self.net.train()\n",
    "            loss_epoch = 0\n",
    "            for i, (images, labels) in enumerate(self.dataset_train_loader, start=1):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "                self.optimizer.zero_grad()\n",
    "                output = self.net(images)\n",
    "                loss = self.criterion(output, labels)\n",
    "                loss.backward()\n",
    "                self.optimizer.step()\n",
    "\n",
    "                loss_epoch += loss.data.item()\n",
    "            self._adjust_lr(epoch)\n",
    "            self.list_loss.append(loss_epoch)\n",
    "            print('Epoch:%d, Loss:%f' % (epoch, loss_epoch))\n",
    "            self.test(epoch)\n",
    "        lossfile = np.array(self.list_loss)\n",
    "        np.save(self.path_loss + '/teacher_loss_{}'.format(self.epochs), lossfile)\n",
    "    def test(self, epoch):\n",
    "        self.net.eval()\n",
    "        total_correct = 0\n",
    "        with torch.no_grad():\n",
    "            for i, (images, labels) in enumerate(self.dataset_test_loader, start=1):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "                output = self.net(images)\n",
    "                pred = output.data.max(1)[1]\n",
    "                total_correct += pred.eq(labels.data.view_as(pred)).sum()\n",
    "            \n",
    "        acc = float(total_correct) / len(self.dataset_test)\n",
    "        if acc > self.best_accr:\n",
    "            self.best_accr = acc\n",
    "            self.save_model(self.path_ckpt, epoch)\n",
    "\n",
    "        print('Test Accuracy:%f' % (acc))\n",
    "\n",
    "    def save_model(self, path, epoch):\n",
    "        state = {'net': self.net.state_dict(), 'optimizer':self.optimizer.state_dict(), 'epoch':epoch}\n",
    "        filename = path + 'teacher__accr%f_epoch%d.pth'%(self.best_accr, epoch)\n",
    "        torch.save(state, filename)\n",
    "\n",
    "    def _adjust_lr(self, epoch):\n",
    "        if epoch < 80:\n",
    "            lr = 0.1\n",
    "        elif epoch < 120:\n",
    "            lr = 0.01\n",
    "        else:\n",
    "            lr = 0.001\n",
    "        for param_group in self.optimizer.param_groups:\n",
    "            param_group['lr'] = lr\n",
    "\n",
    "        "
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 学生网络训练"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "source": [
    "class StudentTrainer():\n",
    "    def __init__(self, epochs, name_dataset, path_dataset_cifar, path_imagenet, \n",
    "                 path_loss, path_student_ckpt, path_teacher_ckpt, num_select):\n",
    "        self.epochs = epochs\n",
    "        self.path_loss = path_loss\n",
    "        self.path_student_ckpt = path_student_ckpt\n",
    "        self.path_teacher_ckpt = path_teacher_ckpt\n",
    "        # 测试数据集准备和教师网络\n",
    "        transform_test = transforms.Compose([\n",
    "            transforms.ToTensor(),\n",
    "            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n",
    "        ])\n",
    "        if name_dataset == 'cifar10':\n",
    "            self.data_test = CIFAR10(path_dataset_cifar,\n",
    "                                     train=False,\n",
    "                                     transform=transform_test)\n",
    "            self.teacher_acc = torch.Tensor([0.9523])\n",
    "            self.class_num = 10\n",
    "            self.teacher = ResNet34().cuda()\n",
    "        if name_dataset == 'cifar100':\n",
    "            self.data_test = CIFAR100(path_dataset_cifar,\n",
    "                                     train=False,\n",
    "                                     transform=transform_test)\n",
    "            self.teacher_acc = torch.Tensor([0.7774])\n",
    "            self.class_num = 100\n",
    "            self.teacher = ResNet34(num_classes=100).cuda()\n",
    "        self.teacher.load_state_dict(torch.load(self.path_teacher_ckpt)['net'])\n",
    "\n",
    "        self.data_test_loader = DataLoader(self.data_test, batch_size=1000, num_workers=8)\n",
    "        # 用于筛选正样本的原始训练数据集\n",
    "        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n",
    "        self.data_train = ImageFolder(path_imagenet, transforms.Compose([\n",
    "            transforms.Resize((32, 32)),\n",
    "            transforms.ToTensor(),\n",
    "            normalize,\n",
    "        ]))\n",
    "        self.data_train_loader_noshuffle = DataLoader(self.data_train, batch_szie=256, shuffle=False, num_workers=8)\n",
    "        #  从原始数据集中筛选\n",
    "        self.num_select = num_select\n",
    "        self.positive_index = self._select_dataset()\n",
    "        self.dataset_to_selected = ImageFolder(path_imagenet, transforms.Compose([\n",
    "            transforms.Resize((32, 32)),\n",
    "            transforms.RandomCrop(32, padding=4),\n",
    "            transforms.RandomHorizontalFlip(),\n",
    "            transforms.ToTensor(),\n",
    "            normalize,\n",
    "        ]))\n",
    "        self.dataset_selected = torch.utils.data.Subset(self.dataset_to_selected, self.positive_index)\n",
    "        self.dataset_selected_loader = torch.utils.data.DataLoader(self.dataset_selected, batch_size=256, shuffle=True, num_workers=8)\n",
    "        # 网络\n",
    "        self.noise_adaption = torch.nn.Parameter(torch.zeros(self.class_num, self.class_num-1))\n",
    "        self.student = ResNet18(self.class_num).cuda()\n",
    "        self.nll = nn.NLLLoss().cuda()      ## cross-entropy = softmax + log + nll(把log结果中对应labels的值取负，再求平均)\n",
    "        self.criterion = nn.CrossEntropyLoss().cuda()\n",
    "        self.optimizer = torch.optim.SGD(list(self.student.parameters()), lr=0.1, momentum=0.9, weight_decay=5e-4)\n",
    "        self.optimizer_noise = torch.optim.Adam([self.noise_adaption], lr=0.001)\n",
    "\n",
    "        self.loss_list = []\n",
    "        self.best_acc = 0\n",
    "\n",
    "    def train(self):\n",
    "        for epoch in range(1, self.epochs+1):\n",
    "            loss_epoch = 0\n",
    "            self.student.train()\n",
    "            for i, (images, labels) in enumerate(self.dataset_selected_loader):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "\n",
    "                self.optimizer.zero_grad()\n",
    "                self.optimizer_noise.zero_grad()\n",
    "\n",
    "                output = self.student(images)\n",
    "                output_t = self.teacher(images).detach()\n",
    "                pseudo_labels = output_t.data.max(1)[1]\n",
    "                # 损失1：硬损失\n",
    "                loss = self._kdloss(output, output_t)\n",
    "                # 将学生预测结果[batch_size, class_num]乘以一个矩阵[class_num, class_num]\n",
    "                # 将相乘结果和伪标签求损失\n",
    "                output_s = F.softmax(output, dim=1)\n",
    "                output_s_adaption = torch.matmul(output_s, self._noise())\n",
    "                loss += self.nll(torch.log(output_s_adaption), pseudo_labels)\n",
    "\n",
    "                loss.backward()\n",
    "                self.optimizer.step()\n",
    "                self.optimizer_noise.step()\n",
    "                loss_epoch += loss.data.item()\n",
    "            self.loss_list.append(loss_epoch)\n",
    "            print(\"Epoch:%d, Loss:%f\"%(epoch, loss_epoch))\n",
    "            self.test(epoch)\n",
    "        file_loss = np.array(self.loss_list)\n",
    "        np.save(self.path_loss + 'student_epoch{}'.format(self.epochs), file_loss)\n",
    "    def test(self, epoch):\n",
    "        self.student.eval()\n",
    "        \n",
    "        total_correct = 0\n",
    "        with torch.no_grad():\n",
    "            for i, (images, labels) in enumerate(self.data_test_loader, start=1):\n",
    "                images, labels = Variable(images).cuda(), Variable(labels).cuda()\n",
    "                output = self.student(images)\n",
    "                pred = output.data.max(1)[1]\n",
    "                total_correct += pred.eq(labels.data.view_as(pred)).sum()\n",
    "\n",
    "        acc = float(total_correct) / len(self.data_test)\n",
    "        print('Test ACC: %f'%(acc))\n",
    "        if acc > self.best_acc:\n",
    "            self.best_acc = acc\n",
    "            self.save_model(epoch, acc)\n",
    "    \n",
    "    def save_model(self, epochs, accr):\n",
    "        state = {'net': self.student.state_dict(), 'optimizer':self.optimizer.state_dict(), 'epoch':epochs}\n",
    "        filename = self.path_student_ckpt + 'student_accr%f_epoch_%d.pth' %(accr, epochs)\n",
    "        torch.save(state, filename)\n",
    "\n",
    "\n",
    "\n",
    "                \n",
    "\n",
    "\n",
    "\n",
    "\n",
    "    def _kdloss(self, student_scores, teacher_scores, T=4):\n",
    "        p = F.log_softmax(student_scores/T, dim=1)\n",
    "        q = F.softmax(teacher_scores/T, dim=1)\n",
    "        l_kl = F.kl_div(p, q, reduce=False)\n",
    "        loss = torch.sum(l_kl) / teacher_scores.shape[0]\n",
    "        return loss * (T**2)\n",
    "\n",
    "    def _noise(self):\n",
    "        ## 把10x9的矩阵按行取softmax，再乘(1-teacher_acc)，再在对角线添加teacher_acc，得到一个10x10的矩阵\n",
    "        noise_adaption_softmax = F.softmax(self.noise_adaption, dim=1) * (1 - self.teacher_acc)\n",
    "        noise_adaption_layer = torch.zero(self.class_num, self.class_num)\n",
    "        for i in range(self.class_num):\n",
    "            if i == 0:\n",
    "                noise_adaption_layer[i] = torch.cat([self.teacher_acc, noise_adaption_softmax[i][i:]])\n",
    "            if i == self.class_num-1:\n",
    "                noise_adaption_layer[i] = torch.cat([noise_adaption_softmax[i][:i], self.teacher_acc])\n",
    "            else:\n",
    "                noise_adaption_layer[i] = torch.cat([noise_adaption_softmax[i][:i], self.teacher_acc, noise_adaption_softmax[i][i:]])\n",
    "        return noise_adaption_layer.cuda()\n",
    "    \n",
    "    def _select_dataset(self):\n",
    "        loss_list, pseudo_labels_list = self._identify_outlier()\n",
    "        positive_index = loss_list.topk(self.num_select, largest=False)[1]\n",
    "        positive_index = positive_index.tolist()\n",
    "        return positive_index\n",
    "\n",
    "\n",
    "    def _identify_outlier(self):\n",
    "        value = []\n",
    "        pseudo_labels_list = []\n",
    "        index = 0\n",
    "        celoss = nn.CrossEntropyLoss(reduction='none').cuda()\n",
    "        self.teacher.eval()\n",
    "        for i, (inputs, labels) in enumerate(self.data_train_loader_noshuffle, start=1):\n",
    "            inputs = inputs.cuda()\n",
    "            outputs = self.teacher(inputs)\n",
    "            pseudo_labels = outputs.data.max(1)[1]\n",
    "            loss = celoss(outputs, pseudo_labels)\n",
    "            value.append(loss.detach().clone())\n",
    "            index += inputs.shape[0]\n",
    "            pseudo_labels_list.append(pseudo_labels)\n",
    "        # cat将[tensor([1]),tensor([2])]改为tensor([1, 2])\n",
    "        return torch.cat(value, dim=0), torch.cat(pseudo_labels_list, dim=0)\n",
    "\n",
    "\n",
    "            \n",
    "\n",
    "\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 训练教师网络--CIFAR10"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "source": [
    "path_current = os.getcwd()\n",
    "path_imagenet = \"/home/yinzp/workspace/dataset\"\n",
    "path_cifar = \"/home/yinzp/workspace/dataset/cifar-10-python\"\n",
    "path_loss = os.path.join(path_current, 'cache/models/teacher/')\n",
    "path_student_ckpt = os.path.join(path_current, 'cache/models/student')\n",
    "path_teacher_ckpt = os.path.join(path_current, 'cache/models/teacher')\n",
    "train_teacher = TeacherTrainer(200, path_cifar, path_teacher_ckpt, path_loss)\n",
    "train_teacher.train()"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "Epoch:1, Loss:792.418685\n",
      "Test Accuracy:0.405900\n",
      "Epoch:2, Loss:606.494960\n",
      "Test Accuracy:0.473700\n",
      "Epoch:3, Loss:521.761431\n",
      "Test Accuracy:0.482200\n",
      "Epoch:4, Loss:433.693335\n",
      "Test Accuracy:0.652500\n",
      "Epoch:5, Loss:363.521431\n",
      "Test Accuracy:0.674200\n",
      "Epoch:6, Loss:308.265708\n",
      "Test Accuracy:0.742400\n",
      "Epoch:7, Loss:270.097119\n",
      "Test Accuracy:0.708800\n",
      "Epoch:8, Loss:243.298810\n",
      "Test Accuracy:0.776900\n",
      "Epoch:9, Loss:226.158543\n",
      "Test Accuracy:0.787000\n",
      "Epoch:10, Loss:213.838875\n",
      "Test Accuracy:0.740400\n",
      "Epoch:11, Loss:201.648535\n",
      "Test Accuracy:0.755600\n",
      "Epoch:12, Loss:190.229908\n",
      "Test Accuracy:0.798200\n",
      "Epoch:13, Loss:184.381434\n",
      "Test Accuracy:0.772500\n",
      "Epoch:14, Loss:177.682976\n",
      "Test Accuracy:0.774500\n",
      "Epoch:15, Loss:172.242573\n",
      "Test Accuracy:0.803700\n",
      "Epoch:16, Loss:164.769477\n",
      "Test Accuracy:0.805300\n",
      "Epoch:17, Loss:164.672796\n",
      "Test Accuracy:0.813300\n",
      "Epoch:18, Loss:158.660461\n",
      "Test Accuracy:0.782400\n",
      "Epoch:19, Loss:157.266840\n",
      "Test Accuracy:0.787000\n",
      "Epoch:20, Loss:156.301559\n",
      "Test Accuracy:0.850000\n",
      "Epoch:21, Loss:150.743430\n",
      "Test Accuracy:0.813100\n",
      "Epoch:22, Loss:149.755561\n",
      "Test Accuracy:0.831600\n",
      "Epoch:23, Loss:150.287928\n",
      "Test Accuracy:0.792900\n",
      "Epoch:24, Loss:144.069363\n",
      "Test Accuracy:0.807800\n",
      "Epoch:25, Loss:143.505919\n",
      "Test Accuracy:0.837500\n",
      "Epoch:26, Loss:141.672109\n",
      "Test Accuracy:0.817200\n",
      "Epoch:27, Loss:141.818353\n",
      "Test Accuracy:0.818400\n",
      "Epoch:28, Loss:139.797123\n",
      "Test Accuracy:0.822000\n",
      "Epoch:29, Loss:137.161731\n",
      "Test Accuracy:0.827800\n",
      "Epoch:30, Loss:136.213917\n",
      "Test Accuracy:0.832800\n",
      "Epoch:31, Loss:137.792306\n",
      "Test Accuracy:0.823600\n",
      "Epoch:32, Loss:133.847257\n",
      "Test Accuracy:0.810700\n",
      "Epoch:33, Loss:135.356939\n",
      "Test Accuracy:0.856500\n",
      "Epoch:34, Loss:133.139945\n",
      "Test Accuracy:0.846000\n",
      "Epoch:35, Loss:133.137407\n",
      "Test Accuracy:0.849100\n",
      "Epoch:36, Loss:131.694180\n",
      "Test Accuracy:0.859500\n",
      "Epoch:37, Loss:130.106526\n",
      "Test Accuracy:0.821200\n",
      "Epoch:38, Loss:129.901791\n",
      "Test Accuracy:0.803700\n",
      "Epoch:39, Loss:130.739426\n",
      "Test Accuracy:0.828700\n",
      "Epoch:40, Loss:130.799551\n",
      "Test Accuracy:0.828800\n",
      "Epoch:41, Loss:129.778334\n",
      "Test Accuracy:0.829900\n",
      "Epoch:42, Loss:130.251802\n",
      "Test Accuracy:0.832800\n",
      "Epoch:43, Loss:127.266153\n",
      "Test Accuracy:0.865000\n",
      "Epoch:44, Loss:127.533879\n",
      "Test Accuracy:0.821300\n",
      "Epoch:45, Loss:128.374719\n",
      "Test Accuracy:0.827000\n",
      "Epoch:46, Loss:127.511600\n",
      "Test Accuracy:0.879700\n",
      "Epoch:47, Loss:126.500161\n",
      "Test Accuracy:0.864000\n",
      "Epoch:48, Loss:123.401348\n",
      "Test Accuracy:0.758800\n",
      "Epoch:49, Loss:127.646220\n",
      "Test Accuracy:0.854300\n",
      "Epoch:50, Loss:125.096405\n",
      "Test Accuracy:0.830100\n",
      "Epoch:51, Loss:125.722474\n",
      "Test Accuracy:0.859400\n",
      "Epoch:52, Loss:125.004694\n",
      "Test Accuracy:0.836700\n",
      "Epoch:53, Loss:123.565878\n",
      "Test Accuracy:0.848200\n",
      "Epoch:54, Loss:123.717793\n",
      "Test Accuracy:0.845500\n",
      "Epoch:55, Loss:123.480975\n",
      "Test Accuracy:0.819300\n",
      "Epoch:56, Loss:123.545562\n",
      "Test Accuracy:0.853100\n",
      "Epoch:57, Loss:122.836183\n",
      "Test Accuracy:0.773300\n",
      "Epoch:58, Loss:122.768315\n",
      "Test Accuracy:0.835900\n",
      "Epoch:59, Loss:122.674262\n",
      "Test Accuracy:0.844400\n",
      "Epoch:60, Loss:123.040004\n",
      "Test Accuracy:0.856500\n",
      "Epoch:61, Loss:121.809736\n",
      "Test Accuracy:0.823900\n",
      "Epoch:62, Loss:120.026624\n",
      "Test Accuracy:0.834100\n",
      "Epoch:63, Loss:122.174248\n",
      "Test Accuracy:0.813000\n",
      "Epoch:64, Loss:122.783722\n",
      "Test Accuracy:0.838300\n",
      "Epoch:65, Loss:122.125130\n",
      "Test Accuracy:0.839100\n",
      "Epoch:66, Loss:120.423375\n",
      "Test Accuracy:0.825700\n",
      "Epoch:67, Loss:122.983240\n",
      "Test Accuracy:0.850700\n",
      "Epoch:68, Loss:121.900152\n",
      "Test Accuracy:0.837800\n",
      "Epoch:69, Loss:122.387752\n",
      "Test Accuracy:0.837600\n",
      "Epoch:70, Loss:119.359386\n",
      "Test Accuracy:0.834000\n",
      "Epoch:71, Loss:120.091250\n",
      "Test Accuracy:0.872000\n",
      "Epoch:72, Loss:121.820760\n",
      "Test Accuracy:0.883400\n",
      "Epoch:73, Loss:119.463266\n",
      "Test Accuracy:0.852400\n",
      "Epoch:74, Loss:119.442309\n",
      "Test Accuracy:0.852500\n",
      "Epoch:75, Loss:120.900169\n",
      "Test Accuracy:0.845400\n",
      "Epoch:76, Loss:117.961112\n",
      "Test Accuracy:0.805200\n",
      "Epoch:77, Loss:118.862462\n",
      "Test Accuracy:0.820600\n",
      "Epoch:78, Loss:120.991177\n",
      "Test Accuracy:0.833700\n",
      "Epoch:79, Loss:120.323436\n",
      "Test Accuracy:0.840500\n",
      "Epoch:80, Loss:117.778875\n",
      "Test Accuracy:0.856300\n",
      "Epoch:81, Loss:120.856614\n",
      "Test Accuracy:0.832100\n",
      "Epoch:82, Loss:118.565473\n",
      "Test Accuracy:0.833300\n",
      "Epoch:83, Loss:118.311175\n",
      "Test Accuracy:0.844200\n",
      "Epoch:84, Loss:117.837720\n",
      "Test Accuracy:0.831400\n",
      "Epoch:85, Loss:120.094727\n",
      "Test Accuracy:0.871300\n",
      "Epoch:86, Loss:118.285888\n",
      "Test Accuracy:0.824100\n",
      "Epoch:87, Loss:119.190389\n",
      "Test Accuracy:0.826600\n",
      "Epoch:88, Loss:119.235159\n",
      "Test Accuracy:0.794600\n",
      "Epoch:89, Loss:118.774044\n",
      "Test Accuracy:0.844000\n",
      "Epoch:90, Loss:117.829214\n",
      "Test Accuracy:0.843000\n",
      "Epoch:91, Loss:119.014735\n",
      "Test Accuracy:0.834400\n",
      "Epoch:92, Loss:118.426828\n",
      "Test Accuracy:0.811700\n",
      "Epoch:93, Loss:120.660780\n",
      "Test Accuracy:0.840700\n",
      "Epoch:94, Loss:118.505884\n",
      "Test Accuracy:0.857300\n",
      "Epoch:95, Loss:116.124410\n",
      "Test Accuracy:0.820500\n",
      "Epoch:96, Loss:117.240478\n",
      "Test Accuracy:0.850100\n",
      "Epoch:97, Loss:119.056967\n",
      "Test Accuracy:0.839400\n",
      "Epoch:98, Loss:115.856940\n",
      "Test Accuracy:0.864600\n",
      "Epoch:99, Loss:117.439737\n",
      "Test Accuracy:0.737300\n",
      "Epoch:100, Loss:117.646521\n",
      "Test Accuracy:0.812500\n",
      "Epoch:101, Loss:116.254253\n",
      "Test Accuracy:0.870700\n",
      "Epoch:102, Loss:116.818860\n",
      "Test Accuracy:0.831800\n",
      "Epoch:103, Loss:120.024514\n",
      "Test Accuracy:0.859500\n",
      "Epoch:104, Loss:116.063723\n",
      "Test Accuracy:0.821200\n",
      "Epoch:105, Loss:117.712150\n",
      "Test Accuracy:0.828700\n",
      "Epoch:106, Loss:116.332433\n",
      "Test Accuracy:0.821900\n",
      "Epoch:107, Loss:115.160051\n",
      "Test Accuracy:0.856200\n",
      "Epoch:108, Loss:118.186061\n",
      "Test Accuracy:0.856800\n",
      "Epoch:109, Loss:116.170225\n",
      "Test Accuracy:0.813400\n",
      "Epoch:110, Loss:115.976609\n",
      "Test Accuracy:0.859300\n",
      "Epoch:111, Loss:116.749959\n",
      "Test Accuracy:0.855500\n",
      "Epoch:112, Loss:116.970916\n",
      "Test Accuracy:0.867200\n",
      "Epoch:113, Loss:116.844593\n",
      "Test Accuracy:0.795000\n",
      "Epoch:114, Loss:115.718335\n",
      "Test Accuracy:0.866300\n",
      "Epoch:115, Loss:116.992313\n",
      "Test Accuracy:0.874200\n",
      "Epoch:116, Loss:115.915149\n",
      "Test Accuracy:0.871500\n",
      "Epoch:117, Loss:116.279059\n",
      "Test Accuracy:0.865500\n",
      "Epoch:118, Loss:113.475188\n",
      "Test Accuracy:0.851600\n",
      "Epoch:119, Loss:116.748396\n",
      "Test Accuracy:0.835300\n",
      "Epoch:120, Loss:115.469739\n",
      "Test Accuracy:0.789800\n"
     ]
    }
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "## 训练学生网络--ImageNet(挑选)"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "source": [
    "path_current = os.getcwd()\n",
    "path_imagenet = \"/home/yinzp/workspace/dataset\"\n",
    "path_cifar = \"/home/yinzp/workspace/dataset/cifar10\"\n",
    "path_loss = os.path.join(path_current, 'cache/models/student/')\n",
    "path_student_ckpt = os.path.join(path_current, 'cache/models/student')\n",
    "path_teacher_ckpt = os.path.join(path_current, 'cache/models/teacher')\n",
    "trainstudent = StudentTrainer(120, 'cifar10', path_cifar, path_imagenet, path_loss, path_student_ckpt, path_teacher_ckpt, 60000)\n",
    "print(path_loss)\n",
    "dataset_imagenet = ImageNet(path_imagenet, download=False, )"
   ],
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "/home/yinzp/gitee/paper-reading/DFND/cache/models/student/\n"
     ]
    }
   ],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.6.9",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.6.9 64-bit"
  },
  "interpreter": {
   "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}