{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h2>Quick Start: Creating Sample-wise Unlearnable Examples</h2>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Prepare Data</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "e:\\adult-entertainment\\anaconda\\envs\\UE\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:557: UserWarning: This DataLoader will create 12 worker processes in total. Our suggested max number of worker in current system is 4 (`cpuset` is not taken into account), which is smaller than what this DataLoader is going to create. Please be aware that excessive worker creation might get DataLoader running slow or even freeze, lower the worker number to avoid potential slowness/freeze if necessary.\n",
      "  warnings.warn(_create_warning_msg(\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision import datasets, transforms\n",
    "\n",
    "# Prepare Dataset\n",
    "train_transform = [\n",
    "    transforms.ToTensor()\n",
    "]\n",
    "test_transform = [\n",
    "    transforms.ToTensor()\n",
    "]\n",
    "train_transform = transforms.Compose(train_transform)\n",
    "test_transform = transforms.Compose(test_transform)\n",
    "\n",
    "clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "clean_test_dataset = datasets.CIFAR10(root='../datasets', train=False, download=True, transform=test_transform)\n",
    "\n",
    "clean_train_loader = DataLoader(dataset=clean_train_dataset, batch_size=512,\n",
    "                                shuffle=False, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "clean_test_loader = DataLoader(dataset=clean_test_dataset, batch_size=512,\n",
    "                                shuffle=False, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Prepare Model</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from models.ResNet import ResNet18\n",
    "import toolbox\n",
    "\n",
    "torch.backends.cudnn.enabled = True\n",
    "#torch.backends.cudnn.benchmark = True\n",
    "\n",
    "base_model = ResNet18()\n",
    "\n",
    "base_model = base_model.cuda()\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.SGD(params=base_model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\n",
    "\n",
    "noise_generator = toolbox.PerturbationTool(epsilon=0.03137254901960784, num_steps=20, step_size=0.0031372549019607846)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Generate Error-Minimizing Noise</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:14<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 13.28\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:15<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 22.71\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:15<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 60.11\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:14<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 59.25\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:15<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 55.90\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:15<00:00,  1.99s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 89.93\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 98/98 [03:15<00:00,  2.00s/it]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy 99.62\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "\n",
    "noise = torch.zeros([50000, 3, 32, 32])\n",
    "data_iter = iter(clean_train_loader)\n",
    "condition = True\n",
    "train_idx = 0\n",
    "cur_times = 30\n",
    "#while condition:\n",
    "while cur_times>0:\n",
    "    # optimize theta for M steps  这里 M 是 10，拿M * batch 张照片，加入对应噪声，然后拟合\n",
    "    base_model.train()\n",
    "    for param in base_model.parameters():\n",
    "        param.requires_grad = True\n",
    "    for j in range(0, 10):  \n",
    "        try:\n",
    "            (images, labels) = next(data_iter)  #   每次取一批图片，batch为512张.10轮即5120张图片\n",
    "        except:\n",
    "            train_idx = 0\n",
    "            data_iter = iter(clean_train_loader)\n",
    "            (images, labels) = next(data_iter)\n",
    "        \n",
    "        for i, _ in enumerate(images):\n",
    "            # Update noise to images\n",
    "            images[i] += noise[train_idx]\n",
    "            train_idx += 1              #   sample-wise\n",
    "        images, labels = images.cuda( ), labels.cuda( )\n",
    "        base_model.zero_grad()  #   每次优化都清零梯度？每张照片独立根据梯度优化\n",
    "        optimizer.zero_grad()\n",
    "        logits = base_model(images)\n",
    "        loss = criterion(logits, labels)\n",
    "        loss.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(base_model.parameters(), 5.0)\n",
    "        optimizer.step()\n",
    "    \n",
    "    # Perturbation over entire dataset\n",
    "    idx = 0\n",
    "    for param in base_model.parameters():\n",
    "        param.requires_grad = False         #   禁用所有模型的参数更新，接下来要更新输入\n",
    "    for i, (images, labels) in tqdm(enumerate(clean_train_loader), total=len(clean_train_loader)):\n",
    "        batch_start_idx, batch_noise = idx, []\n",
    "        for i, _ in enumerate(images):\n",
    "            # Update noise to images\n",
    "            batch_noise.append(noise[idx])\n",
    "            idx += 1\n",
    "        batch_noise = torch.stack(batch_noise).cuda( )\n",
    "        \n",
    "        # Update sample-wise perturbation\n",
    "        base_model.eval()\n",
    "        images, labels = images.cuda( ), labels.cuda( )\n",
    "        perturb_img, eta = noise_generator.min_min_attack(images, labels, base_model, optimizer, criterion, \n",
    "                                                          random_noise=batch_noise) #   eta是该方法 迭代过后的噪声\n",
    "        for i, delta in enumerate(eta):\n",
    "            noise[batch_start_idx+i] = delta.clone().detach().cpu()     #   将批处理中每个元素的扰动 delta 从 eta 中复制出来，并将它们作为独立的张量存储在 noise 的相应位置上\n",
    "        \n",
    "    # Eval stop condition\n",
    "    eval_idx, total, correct = 0, 0, 0\n",
    "    for i, (images, labels) in enumerate(clean_train_loader):\n",
    "        for i, _ in enumerate(images):\n",
    "            # Update noise to images\n",
    "            images[i] += noise[eval_idx]    #   一个样本对应加一个噪声\n",
    "            eval_idx += 1\n",
    "        images, labels = images.cuda( ), labels.cuda( )\n",
    "        with torch.no_grad():               #   不更新梯度的情况下，正向传播，记录准确率\n",
    "            logits = base_model(images)\n",
    "            _, predicted = torch.max(logits.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "    acc = correct / total\n",
    "    print('Accuracy %.2f' % (acc*100))\n",
    "    if acc > 0.99:\n",
    "        condition=False    \n",
    "    cur_times -=1 \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Examine the noise\n",
    "print(noise)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Creat Unlearnable Dataset</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import random\n",
    "\n",
    "# Add standard augmentation\n",
    "train_transform = [\n",
    "    transforms.RandomCrop(32, padding=4),\n",
    "    transforms.RandomHorizontalFlip(),\n",
    "    transforms.ToTensor()\n",
    "]\n",
    "train_transform = transforms.Compose(train_transform)\n",
    "clean_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "unlearnable_train_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "\n",
    "\n",
    "perturb_noise = noise.mul(255).clamp_(0, 255).permute(0, 2, 3, 1).to('cpu').numpy()\n",
    "unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.float32)\n",
    "for i in range(len(unlearnable_train_dataset)):\n",
    "    unlearnable_train_dataset.data[i] += perturb_noise[i]\n",
    "    unlearnable_train_dataset.data[i] = np.clip(unlearnable_train_dataset.data[i], a_min=0, a_max=255)\n",
    "unlearnable_train_dataset.data = unlearnable_train_dataset.data.astype(np.uint8)\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "# 将噪声不对应地加进原数据集中，生成乱序的带噪声数据集ue1\n",
    "shuffle_ue1_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "shuffle_ue1_dataset.data = shuffle_ue1_dataset.data.astype(np.float32)\n",
    "\n",
    "# 定义一个数组1\n",
    "random_index = np.arange(len(shuffle_ue1_dataset))\n",
    "# 打乱数组，直到满足所有元素的值都不等于其下标\n",
    "while True:\n",
    "    np.random.shuffle(random_index)\n",
    "    if not np.any(random_index == np.arange(len(shuffle_ue1_dataset))):\n",
    "        break\n",
    "\n",
    "for i in range(len(shuffle_ue1_dataset)):\n",
    "    shuffle_ue1_dataset.data[i] += perturb_noise[random_index[i]]\n",
    "    shuffle_ue1_dataset.data[i] = np.clip(shuffle_ue1_dataset.data[i], a_min=0, a_max=255)\n",
    "shuffle_ue1_dataset.data = shuffle_ue1_dataset.data.astype(np.uint8)\n",
    "\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "# 将噪声不对应地加进原数据集中，生成乱序的带噪声数据集ue2\n",
    "shuffle_ue2_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "shuffle_ue2_dataset.data = shuffle_ue2_dataset.data.astype(np.float32)\n",
    "\n",
    "# 定义一个数组2\n",
    "random_index2 = np.arange(len(shuffle_ue2_dataset))\n",
    "# 打乱数组，直到满足所有元素的值都不等于其下标\n",
    "while True:\n",
    "    np.random.shuffle(random_index2)\n",
    "    if not np.any(random_index2 == np.arange(len(shuffle_ue2_dataset))):\n",
    "        break\n",
    "\n",
    "for i in range(len(shuffle_ue2_dataset)):\n",
    "    shuffle_ue2_dataset.data[i] += perturb_noise[random_index2[i]]\n",
    "    shuffle_ue2_dataset.data[i] = np.clip(shuffle_ue2_dataset.data[i], a_min=0, a_max=255)\n",
    "shuffle_ue2_dataset.data = shuffle_ue2_dataset.data.astype(np.uint8)\n",
    "\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "#以randomrate%的概率随机触发，且乱序插入噪声，生成%shuffle ue1\n",
    "shuffle_pper_ue1_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "shuffle_pper_ue1_dataset.data = shuffle_pper_ue1_dataset.data.astype(np.float32)\n",
    "\n",
    "randomrate = 0.7\n",
    "\n",
    "for i in range(len(shuffle_pper_ue1_dataset)):\n",
    "    if random.random() < randomrate:\n",
    "    # 这里是概率为ramdomrate%时执行的代码,将随机加入一个\n",
    "        shuffle_pper_ue1_dataset.data[i] += perturb_noise[random_index[i]]\n",
    "        shuffle_pper_ue1_dataset.data[i] = np.clip(shuffle_pper_ue1_dataset.data[i], a_min=0, a_max=255)\n",
    "shuffle_pper_ue1_dataset.data = shuffle_pper_ue1_dataset.data.astype(np.uint8)\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "#以%的概率随机触发，且乱序插入噪声，生成%shuffle ue2\n",
    "shuffle_pper_ue2_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "shuffle_pper_ue2_dataset.data = shuffle_pper_ue2_dataset.data.astype(np.float32)\n",
    "\n",
    "for i in range(len(shuffle_pper_ue2_dataset)):\n",
    "    if random.random() < randomrate:\n",
    "    # 这里是概率为randomrate%时执行的代码,将随机加入一个\n",
    "        shuffle_pper_ue2_dataset.data[i] += perturb_noise[random_index2[i]]\n",
    "        shuffle_pper_ue2_dataset.data[i] = np.clip(shuffle_pper_ue2_dataset.data[i], a_min=0, a_max=255)\n",
    "shuffle_pper_ue2_dataset.data = shuffle_pper_ue2_dataset.data.astype(np.uint8)\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "#以randomrate%的概率随机触发，且顺序插入噪声，生成%order_pper_ue1\n",
    "order_pper_ue1_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "order_pper_ue1_dataset.data = order_pper_ue1_dataset.data.astype(np.float32)\n",
    "\n",
    "# 定义一个数组2\n",
    "random_index2 = np.arange(len(order_pper_ue1_dataset))\n",
    "# 打乱数组，直到满足所有元素的值都不等于其下标\n",
    "while True:\n",
    "    np.random.shuffle(random_index2)\n",
    "    if not np.any(random_index2 == np.arange(len(order_pper_ue1_dataset))):\n",
    "        break\n",
    "\n",
    "for i in range(len(order_pper_ue1_dataset)):\n",
    "    if random.random() < randomrate:\n",
    "    # 这里是概率为ramdomrate%时执行的代码,将随机加入一个\n",
    "        order_pper_ue1_dataset.data[i] += perturb_noise[random_index[i]]\n",
    "        order_pper_ue1_dataset.data[i] = np.clip(order_pper_ue1_dataset.data[i], a_min=0, a_max=255)\n",
    "    else:   #   否则加正确顺序的噪声\n",
    "        order_pper_ue1_dataset.data[i] += perturb_noise[i]\n",
    "        order_pper_ue1_dataset.data[i] = np.clip(order_pper_ue1_dataset.data[i], a_min=0, a_max=255)\n",
    "order_pper_ue1_dataset.data = order_pper_ue1_dataset.data.astype(np.uint8)\n",
    "#-------------------------------------------------------------------------------------------------------------------------------\n",
    "#以randomrate%的概率随机触发，且顺序插入噪声，生成%order_pper_ue2\n",
    "order_pper_ue2_dataset = datasets.CIFAR10(root='../datasets', train=True, download=True, transform=train_transform)\n",
    "order_pper_ue2_dataset.data = order_pper_ue2_dataset.data.astype(np.float32)\n",
    "\n",
    "# 定义一个数组2\n",
    "random_index2 = np.arange(len(order_pper_ue2_dataset))\n",
    "# 打乱数组，直到满足所有元素的值都不等于其下标\n",
    "while True:\n",
    "    np.random.shuffle(random_index2)\n",
    "    if not np.any(random_index2 == np.arange(len(order_pper_ue2_dataset))):\n",
    "        break\n",
    "\n",
    "for i in range(len(order_pper_ue2_dataset)):\n",
    "    if random.random() < randomrate:\n",
    "    # 这里是概率为ramdomrate%时执行的代码,将随机加入一个\n",
    "        order_pper_ue2_dataset.data[i] += perturb_noise[random_index2[i]]\n",
    "        order_pper_ue2_dataset.data[i] = np.clip(order_pper_ue2_dataset.data[i], a_min=0, a_max=255)\n",
    "    else:   #   否则加正确顺序的噪声\n",
    "        order_pper_ue2_dataset.data[i] += perturb_noise[i]\n",
    "        order_pper_ue2_dataset.data[i] = np.clip(order_pper_ue2_dataset.data[i], a_min=0, a_max=255)\n",
    "order_pper_ue2_dataset.data = order_pper_ue2_dataset.data.astype(np.uint8)\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Visualize Clean Images, Error-Minimizing Noise, Unlearnable Images</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib\n",
    "%matplotlib inline\n",
    "\n",
    "def imshow(img):\n",
    "    fig = plt.figure(figsize=(8, 8), dpi=80, facecolor='w', edgecolor='k')\n",
    "    npimg = img.numpy()\n",
    "    plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
    "    plt.show()\n",
    "    \n",
    "def get_pairs_of_imgs(idx):\n",
    "    clean_img = clean_train_dataset.data[idx]\n",
    "    unlearnable_img = unlearnable_train_dataset.data[idx]\n",
    "    clean_img = torchvision.transforms.functional.to_tensor(clean_img)\n",
    "    unlearnable_img = torchvision.transforms.functional.to_tensor(unlearnable_img)\n",
    "\n",
    "    x = noise[idx]\n",
    "    x_min = torch.min(x)\n",
    "    x_max = torch.max(x)\n",
    "    noise_norm = (x - x_min) / (x_max - x_min)\n",
    "    noise_norm = torch.clamp(noise_norm, 0, 1)\n",
    "    return [clean_img, noise_norm, unlearnable_img]\n",
    "    \n",
    "selected_idx = [random.randint(0, 50000) for _ in range(3)]\n",
    "img_grid = []\n",
    "for idx in selected_idx:\n",
    "    img_grid += get_pairs_of_imgs(idx)\n",
    "    \n",
    "\n",
    "imshow(torchvision.utils.make_grid(torch.stack(img_grid), nrow=3, pad_value=255))\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "<h3>Train ResNet18 on Unlearnable Dataset</h3>"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 33.95 Loss: 1.83: 100%|██████████| 391/391 [01:15<00:00,  5.21it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 0 轮 Clean Accuracy 43.60\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 51.57 Loss: 1.33: 100%|██████████| 391/391 [01:14<00:00,  5.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 1 轮 Clean Accuracy 57.67\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 62.77 Loss: 1.05: 100%|██████████| 391/391 [01:14<00:00,  5.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 2 轮 Clean Accuracy 66.85\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 70.11 Loss: 0.85: 100%|██████████| 391/391 [01:14<00:00,  5.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 3 轮 Clean Accuracy 72.90\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 75.48 Loss: 0.71: 100%|██████████| 391/391 [01:14<00:00,  5.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 4 轮 Clean Accuracy 77.58\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 78.73 Loss: 0.62: 100%|██████████| 391/391 [01:14<00:00,  5.22it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 5 轮 Clean Accuracy 80.63\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 80.93 Loss: 0.55: 100%|██████████| 391/391 [01:14<00:00,  5.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 6 轮 Clean Accuracy 82.00\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 82.38 Loss: 0.51: 100%|██████████| 391/391 [01:14<00:00,  5.27it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 7 轮 Clean Accuracy 83.38\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 83.52 Loss: 0.48: 100%|██████████| 391/391 [01:14<00:00,  5.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 8 轮 Clean Accuracy 83.98\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 84.25 Loss: 0.45: 100%|██████████| 391/391 [01:14<00:00,  5.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 9 轮 Clean Accuracy 84.26\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 85.34 Loss: 0.42: 100%|██████████| 391/391 [01:13<00:00,  5.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 10 轮 Clean Accuracy 86.24\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 86.36 Loss: 0.40: 100%|██████████| 391/391 [01:13<00:00,  5.29it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 11 轮 Clean Accuracy 86.85\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 87.10 Loss: 0.37: 100%|██████████| 391/391 [01:14<00:00,  5.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 12 轮 Clean Accuracy 86.92\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 87.84 Loss: 0.35: 100%|██████████| 391/391 [01:14<00:00,  5.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 13 轮 Clean Accuracy 88.45\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 88.77 Loss: 0.33: 100%|██████████| 391/391 [01:14<00:00,  5.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 14 轮 Clean Accuracy 89.22\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 89.66 Loss: 0.30: 100%|██████████| 391/391 [01:14<00:00,  5.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 15 轮 Clean Accuracy 90.38\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 91.41 Loss: 0.25:   0%|          | 1/391 [00:53<5:47:04, 53.40s/it]Exception ignored in: <function _MultiProcessingDataLoaderIter.__del__ at 0x00000247917F29D0>\n",
      "Traceback (most recent call last):\n",
      "  File \"c:\\ProgramData\\Anaconda3\\envs\\UE\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1478, in __del__\n",
      "    self._shutdown_workers()\n",
      "  File \"c:\\ProgramData\\Anaconda3\\envs\\UE\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1436, in _shutdown_workers\n",
      "    if self._persistent_workers or self._workers_status[worker_id]:\n",
      "AttributeError: '_MultiProcessingDataLoaderIter' object has no attribute '_workers_status'\n",
      "Exception ignored in: <function _MultiProcessingDataLoaderIter.__del__ at 0x00000247917F29D0>\n",
      "Traceback (most recent call last):\n",
      "  File \"c:\\ProgramData\\Anaconda3\\envs\\UE\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1478, in __del__\n",
      "    self._shutdown_workers()\n",
      "  File \"c:\\ProgramData\\Anaconda3\\envs\\UE\\lib\\site-packages\\torch\\utils\\data\\dataloader.py\", line 1436, in _shutdown_workers\n",
      "    if self._persistent_workers or self._workers_status[worker_id]:\n",
      "AttributeError: '_MultiProcessingDataLoaderIter' object has no attribute '_workers_status'\n",
      "Acc 90.30 Loss: 0.28: 100%|██████████| 391/391 [01:13<00:00,  5.32it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 16 轮 Clean Accuracy 91.15\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 91.26 Loss: 0.26: 100%|██████████| 391/391 [01:14<00:00,  5.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 17 轮 Clean Accuracy 91.45\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 91.84 Loss: 0.24: 100%|██████████| 391/391 [01:14<00:00,  5.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 18 轮 Clean Accuracy 92.70\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 93.05 Loss: 0.20: 100%|██████████| 391/391 [01:14<00:00,  5.26it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 19 轮 Clean Accuracy 93.26\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 93.95 Loss: 0.17: 100%|██████████| 391/391 [01:13<00:00,  5.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 20 轮 Clean Accuracy 93.60\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 94.55 Loss: 0.16: 100%|██████████| 391/391 [01:13<00:00,  5.31it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 21 轮 Clean Accuracy 95.20\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 95.82 Loss: 0.12: 100%|██████████| 391/391 [01:14<00:00,  5.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 22 轮 Clean Accuracy 96.10\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 96.66 Loss: 0.10: 100%|██████████| 391/391 [01:14<00:00,  5.27it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 23 轮 Clean Accuracy 97.03\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 97.50 Loss: 0.08: 100%|██████████| 391/391 [01:14<00:00,  5.25it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 24 轮 Clean Accuracy 97.70\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 98.27 Loss: 0.06: 100%|██████████| 391/391 [01:13<00:00,  5.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 25 轮 Clean Accuracy 98.47\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 98.60 Loss: 0.05: 100%|██████████| 391/391 [01:13<00:00,  5.31it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 26 轮 Clean Accuracy 98.66\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 99.14 Loss: 0.03: 100%|██████████| 391/391 [01:14<00:00,  5.28it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 27 轮 Clean Accuracy 99.03\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 99.26 Loss: 0.03: 100%|██████████| 391/391 [01:13<00:00,  5.30it/s] \n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 28 轮 Clean Accuracy 99.13\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Acc 99.32 Loss: 0.03: 100%|██████████| 391/391 [01:13<00:00,  5.33it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "第 29 轮 Clean Accuracy 99.11\n",
      "\n"
     ]
    }
   ],
   "source": [
    "from util import AverageMeter\n",
    "\n",
    "model = ResNet18()\n",
    "model = model.cuda( )\n",
    "criterion = torch.nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.SGD(params=model.parameters(), lr=0.1, weight_decay=0.0005, momentum=0.9)\n",
    "scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=30, eta_min=0)\n",
    "\n",
    "unlearnable_loader = DataLoader(dataset=unlearnable_train_dataset, batch_size=128,\n",
    "                                shuffle=True, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "#   全乱序加噪声的数据集ue1\n",
    "shuffle_ue1_loader = DataLoader(dataset=shuffle_ue1_dataset, batch_size=128,\n",
    "                                shuffle=True, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "#   全乱序加噪声的数据集ue2\n",
    "shuffle_ue2_loader = DataLoader(dataset=shuffle_ue2_dataset, batch_size=128,\n",
    "                                shuffle=True, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "#   乱序加百分比%噪声的数据集pper_ue1\n",
    "order_pper_ue1_loader = DataLoader(dataset=order_pper_ue1_dataset, batch_size=128,\n",
    "                                shuffle=True, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "#   乱序加百分%噪声的数据集pper_ue2\n",
    "order_pper_ue2_loader = DataLoader(dataset=order_pper_ue2_dataset, batch_size=128,\n",
    "                                shuffle=True, pin_memory=True,\n",
    "                                drop_last=False, num_workers=12)\n",
    "\n",
    "\n",
    "\n",
    "for epoch in range(30):\n",
    "    # Train\n",
    "    model.train()\n",
    "    acc_meter = AverageMeter()\n",
    "    loss_meter = AverageMeter()\n",
    "    pbar = tqdm(order_pper_ue1_loader, total=len(order_pper_ue1_loader))\n",
    "    for images, labels in pbar:\n",
    "        images, labels = images.cuda( ), labels.cuda( )\n",
    "        model.zero_grad()\n",
    "        optimizer.zero_grad()\n",
    "        logits = model(images)\n",
    "        loss = criterion(logits, labels)\n",
    "        loss.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)\n",
    "        optimizer.step()\n",
    "        \n",
    "        _, predicted = torch.max(logits.data, 1)\n",
    "        acc = (predicted == labels).sum().item()/labels.size(0)\n",
    "        acc_meter.update(acc)\n",
    "        loss_meter.update(loss.item())\n",
    "        pbar.set_description(\"Acc %.2f Loss: %.2f\" % (acc_meter.avg*100, loss_meter.avg))\n",
    "    scheduler.step()\n",
    "    # Eval\n",
    "    # model.eval()\n",
    "    # correct, total = 0, 0\n",
    "    # for i, (images, labels) in enumerate(clean_test_loader):\n",
    "    #     images, labels = images.cuda( ), labels.cuda( )\n",
    "    #     with torch.no_grad():\n",
    "    #         logits = model(images)\n",
    "    #         _, predicted = torch.max(logits.data, 1)\n",
    "    #         total += labels.size(0)\n",
    "    #         correct += (predicted == labels).sum().item()\n",
    "    # acc = correct / total\n",
    "    # tqdm.write('Clean Accuracy %.2f\\n' % (acc*100))\n",
    "\n",
    "    #   Eval on poinsoning dataset\n",
    "    correct, total = 0, 0\n",
    "    for i, (images, labels) in enumerate(order_pper_ue2_loader):\n",
    "        images, labels = images.cuda( ), labels.cuda( )\n",
    "        with torch.no_grad():\n",
    "            logits = model(images)\n",
    "            _, predicted = torch.max(logits.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "    acc = correct / total\n",
    "    tqdm.write('第 %d 轮 Clean Accuracy %.2f\\n' % (epoch,acc*100))\n",
    "\n",
    "    \n",
    "            "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3.11.1 64-bit",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.0"
  },
  "vscode": {
   "interpreter": {
    "hash": "198e9af14d851d67d437240d479d15c97ce0316b700c437f81ba8f51ab4cfebe"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
