{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6b439e84-8c6b-4126-8586-d7a1a7c7614e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 该代码文件主要为了在CIFAR100上执行相关实验，获取实验数据。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "daf0a1f8-ef8d-4246-b945-b6e5d905f5f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基线，基于阈值，似然比，添加噪声鲁棒性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3a72ff09-8088-4a9d-b1f7-0825ff54c477",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实验目标，可变的参数，输出的结果\n",
    "# 预期结果\n",
    "# 1、高风险指标的损失分布直方图\n",
    "# 2、base攻击成功率随风险变化图\n",
    "# 3、离群点邻居、距离参数控制的输出点\n",
    "# 4、同一种攻击方式、离群点和风险指标的攻击成功率对比\n",
    "# 5、同样的风险点，不同攻击方法的成功率比较\n",
    "# 6、模型训练集大小的影响\n",
    "# 7、参考模型数量的影响\n",
    "\n",
    "\n",
    "# 需要控制的实验参数：\n",
    "# 1、攻击方法\n",
    "# 2、离群点比例\n",
    "# 3、模型训练集大小"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b61f101e-1c1e-41df-b80b-65d1e3d6eab8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import datasets\n",
    "from torchvision import transforms\n",
    "from torchvision.transforms import ToTensor\n",
    "import torchvision.transforms as tt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import metrics\n",
    "\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "37ddaa77-35ce-49b9-acfd-a7799aadd9a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入自己创建的python文件\n",
    "import sys\n",
    "sys.path.append(\"..\") # Adds higher directory to python modules path.\n",
    "from frame.DataProcess import *\n",
    "from frame.TrainUtil import *\n",
    "from frame.LIRAAttack import *\n",
    "from frame.AttackUtil import *\n",
    "from frame.ShadowAttack import *\n",
    "from frame.ThresholdAttack import *\n",
    "from frame.LabelAttack import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4636a18e-244e-4a21-ba21-591c0295ce7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "LEARNING_RATE = 0.1\n",
    "BATCH_SIZE = 128\n",
    "MODEL = 'ResNet18'\n",
    "EPOCHS = 100\n",
    "DATA_NAME = 'CIFAR100' \n",
    "weight_dir = os.path.join('..', 'weights_for_exp', DATA_NAME)\n",
    "num_shadowsets = 100\n",
    "seed = 0\n",
    "prop_keep = 0.5\n",
    "\n",
    "model_transform = transforms.Compose([\n",
    "    # transforms.ToPILImage(),\n",
    "    # transforms.RandomCrop(32, padding=4),\n",
    "    # transforms.RandomHorizontalFlip(),\n",
    "    # transforms.RandomRotation(15),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n",
    "    ])\n",
    "attack_transform = transforms.Compose([])\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1682aff9-4d9e-4827-aaf3-f7cfcb9bd24c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "b8762984-6f4d-49ad-9010-95d9c6f86b35",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 加载完整的训练数据集\n",
    "X_data, Y_data, train_keep = load_CIFAR100_keep(num_shadowsets, prop_keep, seed)\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, batch_size=64, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "10df71ae-7cb6-49d4-b1b6-69a52a79026b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "test_dataset = datasets.cifar.CIFAR100(root='../datasets/cifar100', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data\n",
    "y_test_data = np.array(test_dataset.targets)\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, batch_size=BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5db8af87-3374-41c7-ab8b-ee885c2fcdc3",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = BATCH_SIZE\n",
    "model = MODEL\n",
    "epochs = EPOCHS\n",
    "data_name = DATA_NAME \n",
    "weight_part = \"{}_{}_epoch{}_shadownum100_model\".format(data_name, model, epochs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b9188f04-4928-4201-b9de-31a398abbf06",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "edbb543f-ce6c-423c-abc8-5bdb4d98ba70",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(32, 32, 3)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_data[0].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6fe181d3-fa6b-48b1-86f3-826a72a0410f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "(x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = globals()['load_{}'.format(data_name)](0, num_shadowsets, prop_keep=prop_keep, seed=0)\n",
    "training_data = CustomDataset(x_train, y_train, model_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5098449a-776e-4cfc-ab0c-2d3ca69cc997",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "e998bc1c-6824-4216-a145-9de83e414d28",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 99.8%, Avg loss: 0.017822 \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.9984412470023981"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 加载一个模型进行测试验证\n",
    "Model = globals()['create_{}_model'.format(model)](Y_data.max()+1)\n",
    "weight_path = os.path.join(weight_dir, weight_part + \"{}.pth\".format(0))\n",
    "# print(Reference_Model)\n",
    "Model.load_state_dict(torch.load(weight_path))\n",
    "Model.to(device)\n",
    "\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "evaluate(train_dataloader, Model, loss_fn, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "832a2c96-481f-4471-a098-4a08c31e9d86",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "18589006-8f9f-48a8-87ce-d57ab3678f88",
   "metadata": {},
   "source": [
    "## 脆弱点的两种选择方式"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "795bc62f-e5fd-4901-977b-953f386be6b5",
   "metadata": {},
   "source": [
    "### 风险指标计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "4a659f5e-de7f-490c-996e-2015ff6e2a05",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 加载所有参考模型上的损失、置信度、得分输出\n",
    "# conf_data_all, label_data, score_all = load_score_data_all(X_data, Y_data, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)\n",
    "# loss_fn = nn.CrossEntropyLoss(reduction='none')\n",
    "# loss_data_all, label_data = load_loss_data_all(X_data, Y_data, loss_fn, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "e81ba8d7-261b-474d-80a6-e59b22ca9202",
   "metadata": {},
   "outputs": [],
   "source": [
    "# np.save('../outputs_save/CIFAR100_resnet_loss.npy', loss_data_all)\n",
    "# np.save('../outputs_save/CIFAR100_resnet_score.npy', score_all)\n",
    "# np.save('../outputs_save/CIFAR100_resnet_conf.npy', conf_data_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "40fefc0a-b557-4cdb-a87f-d60a5f0e8728",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_data_all = np.load('../outputs_save/CIFAR100_resnet_loss.npy')\n",
    "score_all = np.load('../outputs_save/CIFAR100_resnet_score.npy')\n",
    "conf_data_all = np.load('../outputs_save/CIFAR100_resnet_conf.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "a9101bb7-4565-41b5-bacb-21ae5d341e68",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为每个数据点计算风险指标\n",
    "# 计算出一个点的脆弱程度评分\n",
    "pri_risk_all = get_risk_score(loss_data_all, train_keep)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "66866793-5432-4a1f-b45d-cbb5ba6a591f",
   "metadata": {},
   "outputs": [],
   "source": [
    "pri_risk_rank = np.argsort(pri_risk_all)\n",
    "pri_risk_rank = np.flip(pri_risk_rank)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bf228304-af72-480e-87aa-59120896762d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7907660c-3860-4538-8d50-7dbcde10f170",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "bbad7591-29c9-4fe4-84c2-5fe40cd39ab9",
   "metadata": {},
   "source": [
    "### 离群点"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "4d115aa9-f463-459c-bb07-e6943f25e44a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# logits_data_all, label_data = load_logits_data_all(X_data, Y_data, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)\n",
    "# np.save('../outputs_save/CIFAR100_resnet_logits.npy', logits_data_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "5dbec470-e52a-40d6-9bfe-355b4a3e003b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# logits_data_all = np.load('../outputs_save/CIFAR100_resnet_logits.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "1cd40a36-6dbb-4857-88c7-be9de283710b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 按照k个模型进行拼接\n",
    "# k = 10\n",
    "# for i in range(k):\n",
    "#     if i == 0:\n",
    "#         combine_features = logits_data_all[i]\n",
    "#     else:\n",
    "#         combine_features = np.concatenate((combine_features, logits_data_all[i]),axis=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "d998f3b1-5eca-43ad-a33b-05c69ddf9e58",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# # 数据量太大，不能保存所有的余弦相似度，只能需要时计算\n",
    "# alpha_list = [0.05, 0.1, 0.12, 0.15, 0.2, 0.3]\n",
    "# n_num_list = []\n",
    "# # for i in range(combine_features.shape[0]):\n",
    "# for i in range(40000, 50000):\n",
    "#     n_count = [0 for _ in alpha_list]\n",
    "#     if i%50 == 0:\n",
    "#         print(f\"compute to: {i}\")\n",
    "#     for j in range(combine_features.shape[0]):\n",
    "#         # 余弦距离的计算\n",
    "#         vec1 = combine_features[i]\n",
    "#         vec2 = combine_features[j]        \n",
    "#         cos_sim = vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n",
    "#         cos_dis = 0.5 - 0.5 * cos_sim\n",
    "#         for m in range(len(alpha_list)):\n",
    "#             if (cos_dis < alpha_list[m]):\n",
    "#                 n_count[m] += 1\n",
    "#     n_num_list.append(n_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "ea9607e4-c230-44da-8032-1143fb85dd9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# neigh_data_all = np.array(n_num_list)\n",
    "# np.save('../outputs_save/CIFAR100_resnet_neigh3.npy', neigh_data_all)\n",
    "neigh_data_all = np.load('../outputs_save/CIFAR100_resnet_neigh.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "7efb9263-8115-40e2-8657-38e01340395c",
   "metadata": {},
   "outputs": [],
   "source": [
    "neigh_num = neigh_data_all[:,1]\n",
    "risk_rank = np.argsort(neigh_num)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "c32784a6-e30b-46e7-826f-f11e5f26b55c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([    0, 27758, 27756, ..., 45117, 13540, 37013])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "risk_rank"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95f40913-99d1-469a-b25d-26077be88f07",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "daf800d2-4aad-4610-9a68-5ab5aa082c1e",
   "metadata": {},
   "source": [
    "## 针对脆弱点展开攻击"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "92a1da9a-1234-45c6-9376-44bff899fbc4",
   "metadata": {},
   "source": [
    "### 基线攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "9b4ec0a5-5c10-4f62-aaed-24f811408e5e",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 5000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "2336a5a8-327e-411e-8ff8-4abd0b04b5f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp = conf_data_all.argmax(2)\n",
    "pred_result = (tmp == Y_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "d9ddb0fd-0f31-474b-b57e-29396762edeb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.785454\n",
      "MPLR 0.985776\n",
      "base 0.6800698\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f542681a-ab1e-4635-a494-bc26049fca2d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "790f0444-267d-411a-9558-1b73032cebd4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2ea24a8-6b52-4fdd-8b30-3db5b9273668",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "7f771ea6-f3ae-4528-83f2-7ebd63b3c5a2",
   "metadata": {},
   "source": [
    "### 阈值攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "afa7a7bc-107f-4bbe-93fc-da0b39330931",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 5000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "1e95651d-d60d-4b78-b317-e3ef57bcb448",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_threshold = get_loss_threshold(loss_data_all, train_keep)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "aa64484f-001f-4d11-b573-4617edde914c",
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_result = loss_data_all < loss_threshold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "a9277795-3b74-4c50-98c6-924d58331db5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.854154\n",
      "MPLR 0.95209\n",
      "base 0.7720238\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "14ade9c8-a042-4773-a79d-27e8b8847a3a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "5ddcf613-869e-4320-a5c0-4bdb9b0cee3a",
   "metadata": {},
   "source": [
    "### 似然比攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "75534158-1bfc-419d-9228-2650902be504",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "AUC value is: 0.917857997029118\n",
      "Accuracy is: 0.81866\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.81866"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_result = LIRA_attack(train_keep, score_all, score_all[0], train_keep[0])\n",
    "evaluate_ROC(pred_result, train_keep[0], threshold=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "f41f286f-d152-4b22-9b2c-317e95556be4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "1\n",
      "2\n",
      "3\n",
      "4\n",
      "5\n",
      "6\n",
      "7\n",
      "8\n",
      "9\n",
      "10\n",
      "11\n",
      "12\n",
      "13\n",
      "14\n",
      "15\n",
      "16\n",
      "17\n",
      "18\n",
      "19\n",
      "20\n",
      "21\n",
      "22\n",
      "23\n",
      "24\n",
      "25\n",
      "26\n",
      "27\n",
      "28\n",
      "29\n",
      "30\n",
      "31\n",
      "32\n",
      "33\n",
      "34\n",
      "35\n",
      "36\n",
      "37\n",
      "38\n",
      "39\n",
      "40\n",
      "41\n",
      "42\n",
      "43\n",
      "44\n",
      "45\n",
      "46\n",
      "47\n",
      "48\n",
      "49\n",
      "50\n",
      "51\n",
      "52\n",
      "53\n",
      "54\n",
      "55\n",
      "56\n",
      "57\n",
      "58\n",
      "59\n",
      "60\n",
      "61\n",
      "62\n",
      "63\n",
      "64\n",
      "65\n",
      "66\n",
      "67\n",
      "68\n",
      "69\n",
      "70\n",
      "71\n",
      "72\n",
      "73\n",
      "74\n",
      "75\n",
      "76\n",
      "77\n",
      "78\n",
      "79\n",
      "80\n",
      "81\n",
      "82\n",
      "83\n",
      "84\n",
      "85\n",
      "86\n",
      "87\n",
      "88\n",
      "89\n",
      "90\n",
      "91\n",
      "92\n",
      "93\n",
      "94\n",
      "95\n",
      "96\n",
      "97\n",
      "98\n",
      "99\n"
     ]
    }
   ],
   "source": [
    "for i in range(num_shadowsets):\n",
    "    pred_result = LIRA_attack(train_keep, score_all, score_all[i], train_keep[i])\n",
    "    pred_result = pred_result.reshape(1, len(pred_result))\n",
    "    if i==0:\n",
    "        pred_result_all = pred_result\n",
    "    else:\n",
    "        pred_result_all = np.concatenate((pred_result_all, pred_result), axis=0)\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "9eff0893-63ab-4397-93d6-350f01d9f8fa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# X_axi = []\n",
    "# Y_axi = []\n",
    "\n",
    "# for i in range(10000):\n",
    "#     pred_t = pred_result_all[:,i]\n",
    "#     pred_t = pred_t > 0\n",
    "#     mem_t = train_keep[:,i]\n",
    "#     risk_t = pri_risk_all[i]\n",
    "#     acc = metrics.accuracy_score(mem_t, pred_t)\n",
    "#     X_axi.append(risk_t)\n",
    "#     Y_axi.append(acc)\n",
    "\n",
    "# df=pd.DataFrame({'risk': X_axi, 'attack_acc': Y_axi })\n",
    "# df.to_csv('CIFAR100_risk_att.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "f440be06-62d0-4f31-8194-aa04da658048",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 根据MPLR取值区间计算攻击准确率\n",
    "lower_bound, upper_bound = 1, 10\n",
    "indices = np.where((pri_risk_all >= lower_bound) & (pri_risk_all <= upper_bound))\n",
    "pred_clip = pred_result_all[:, indices]\n",
    "mem_clip = train_keep[:, indices]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "e1a7cc38-441a-4a5e-9c85-084b5d83a7e1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.9659538628447439\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "88735ac3-f298-4727-ac9b-a4f00d6d2a0a",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 2500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "58f30503-ffb7-4dd3-9f59-8afb3c0e4cac",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.921608\n",
      "MPLR 0.994052\n",
      "base 0.8186622\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result_all[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result_all[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result_all\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip  > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "368831bd-a953-4115-af85-a33736917815",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "612cee81-b4b4-4a4d-ac23-a6b168948545",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "762b99ad-56c9-4bc1-a3cb-5d3d6f2a8b6c",
   "metadata": {},
   "source": [
    "### 影子模型攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "cdde52d1-ae87-40f0-b61f-1bcca90d6de6",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "(24884, 32, 32, 3) (24884,) (25116, 32, 32, 3) (25116,)\n",
      " Error: \n",
      " Accuracy: 99.8%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 63.7%  \n",
      "\n",
      "(50000, 100) (50000,) (50000,)\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "(24869, 32, 32, 3) (24869,) (25131, 32, 32, 3) (25131,)\n",
      " Error: \n",
      " Accuracy: 99.9%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 64.3%  \n",
      "\n",
      "(50000, 100) (50000,) (50000,)\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "(25028, 32, 32, 3) (25028,) (24972, 32, 32, 3) (24972,)\n",
      " Error: \n",
      " Accuracy: 99.8%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 63.3%  \n",
      "\n",
      "(50000, 100) (50000,) (50000,)\n",
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "(25020, 32, 32, 3) (25020,) (24980, 32, 32, 3) (24980,)\n",
      " Error: \n",
      " Accuracy: 99.8%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 63.6%  \n",
      "\n",
      "test data: (50000, 100) (50000,) (50000,)\n",
      "(150000, 100) (150000,)\n",
      "Attack_NN(\n",
      "  (linear_relu_stack): Sequential(\n",
      "    (0): Linear(in_features=4, out_features=128, bias=True)\n",
      "    (1): ReLU()\n",
      "    (2): Linear(in_features=128, out_features=64, bias=True)\n",
      "    (3): ReLU()\n",
      "    (4): Linear(in_features=64, out_features=1, bias=True)\n",
      "  )\n",
      ")\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 0.688671  [  128/150000]\n",
      "loss: 0.541771  [12928/150000]\n",
      "loss: 0.518256  [25728/150000]\n",
      "loss: 0.436538  [38528/150000]\n",
      "loss: 0.411424  [51328/150000]\n",
      "loss: 0.470087  [64128/150000]\n",
      "loss: 0.517066  [76928/150000]\n",
      "loss: 0.469832  [89728/150000]\n",
      "loss: 0.409436  [102528/150000]\n",
      "loss: 0.460690  [115328/150000]\n",
      "loss: 0.490169  [128128/150000]\n",
      "loss: 0.424337  [140928/150000]\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.500391  [  128/150000]\n",
      "loss: 0.514420  [12928/150000]\n",
      "loss: 0.510802  [25728/150000]\n",
      "loss: 0.430911  [38528/150000]\n",
      "loss: 0.452384  [51328/150000]\n",
      "loss: 0.429746  [64128/150000]\n",
      "loss: 0.410272  [76928/150000]\n",
      "loss: 0.422382  [89728/150000]\n",
      "loss: 0.460792  [102528/150000]\n",
      "loss: 0.536378  [115328/150000]\n",
      "loss: 0.485976  [128128/150000]\n",
      "loss: 0.416766  [140928/150000]\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.511824  [  128/150000]\n",
      "loss: 0.406195  [12928/150000]\n",
      "loss: 0.428918  [25728/150000]\n",
      "loss: 0.530953  [38528/150000]\n",
      "loss: 0.470478  [51328/150000]\n",
      "loss: 0.437217  [64128/150000]\n",
      "loss: 0.477185  [76928/150000]\n",
      "loss: 0.507743  [89728/150000]\n",
      "loss: 0.385715  [102528/150000]\n",
      "loss: 0.452730  [115328/150000]\n",
      "loss: 0.482623  [128128/150000]\n",
      "loss: 0.542572  [140928/150000]\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.519654  [  128/150000]\n",
      "loss: 0.587431  [12928/150000]\n",
      "loss: 0.487198  [25728/150000]\n",
      "loss: 0.494686  [38528/150000]\n",
      "loss: 0.470279  [51328/150000]\n",
      "loss: 0.552718  [64128/150000]\n",
      "loss: 0.531254  [76928/150000]\n",
      "loss: 0.450136  [89728/150000]\n",
      "loss: 0.399913  [102528/150000]\n",
      "loss: 0.486934  [115328/150000]\n",
      "loss: 0.488494  [128128/150000]\n",
      "loss: 0.434309  [140928/150000]\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.486898  [  128/150000]\n",
      "loss: 0.472630  [12928/150000]\n",
      "loss: 0.431870  [25728/150000]\n",
      "loss: 0.517487  [38528/150000]\n",
      "loss: 0.430791  [51328/150000]\n",
      "loss: 0.447596  [64128/150000]\n",
      "loss: 0.492934  [76928/150000]\n",
      "loss: 0.475954  [89728/150000]\n",
      "loss: 0.438354  [102528/150000]\n",
      "loss: 0.494849  [115328/150000]\n",
      "loss: 0.508139  [128128/150000]\n",
      "loss: 0.462412  [140928/150000]\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.484913  [  128/150000]\n",
      "loss: 0.457339  [12928/150000]\n",
      "loss: 0.463282  [25728/150000]\n",
      "loss: 0.463753  [38528/150000]\n",
      "loss: 0.492279  [51328/150000]\n",
      "loss: 0.533367  [64128/150000]\n",
      "loss: 0.458813  [76928/150000]\n",
      "loss: 0.471120  [89728/150000]\n",
      "loss: 0.583066  [102528/150000]\n",
      "loss: 0.371642  [115328/150000]\n",
      "loss: 0.523938  [128128/150000]\n",
      "loss: 0.455555  [140928/150000]\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.502198  [  128/150000]\n",
      "loss: 0.472316  [12928/150000]\n",
      "loss: 0.449022  [25728/150000]\n",
      "loss: 0.528757  [38528/150000]\n",
      "loss: 0.421773  [51328/150000]\n",
      "loss: 0.494679  [64128/150000]\n",
      "loss: 0.607330  [76928/150000]\n",
      "loss: 0.466867  [89728/150000]\n",
      "loss: 0.556232  [102528/150000]\n",
      "loss: 0.448097  [115328/150000]\n",
      "loss: 0.480148  [128128/150000]\n",
      "loss: 0.460835  [140928/150000]\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.474593  [  128/150000]\n",
      "loss: 0.468605  [12928/150000]\n",
      "loss: 0.536080  [25728/150000]\n",
      "loss: 0.502652  [38528/150000]\n",
      "loss: 0.471196  [51328/150000]\n",
      "loss: 0.542862  [64128/150000]\n",
      "loss: 0.447016  [76928/150000]\n",
      "loss: 0.522290  [89728/150000]\n",
      "loss: 0.487711  [102528/150000]\n",
      "loss: 0.457069  [115328/150000]\n",
      "loss: 0.495965  [128128/150000]\n",
      "loss: 0.451522  [140928/150000]\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.478108  [  128/150000]\n",
      "loss: 0.526282  [12928/150000]\n",
      "loss: 0.472679  [25728/150000]\n",
      "loss: 0.472293  [38528/150000]\n",
      "loss: 0.448237  [51328/150000]\n",
      "loss: 0.446722  [64128/150000]\n",
      "loss: 0.434085  [76928/150000]\n",
      "loss: 0.460252  [89728/150000]\n",
      "loss: 0.489034  [102528/150000]\n",
      "loss: 0.445333  [115328/150000]\n",
      "loss: 0.475870  [128128/150000]\n",
      "loss: 0.482080  [140928/150000]\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.474506  [  128/150000]\n",
      "loss: 0.469902  [12928/150000]\n",
      "loss: 0.472247  [25728/150000]\n",
      "loss: 0.453068  [38528/150000]\n",
      "loss: 0.428629  [51328/150000]\n",
      "loss: 0.486334  [64128/150000]\n",
      "loss: 0.450542  [76928/150000]\n",
      "loss: 0.484860  [89728/150000]\n",
      "loss: 0.459524  [102528/150000]\n",
      "loss: 0.506259  [115328/150000]\n",
      "loss: 0.500683  [128128/150000]\n",
      "loss: 0.516497  [140928/150000]\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.431387  [  128/150000]\n",
      "loss: 0.458868  [12928/150000]\n",
      "loss: 0.448678  [25728/150000]\n",
      "loss: 0.494965  [38528/150000]\n",
      "loss: 0.445557  [51328/150000]\n",
      "loss: 0.483953  [64128/150000]\n",
      "loss: 0.479591  [76928/150000]\n",
      "loss: 0.485381  [89728/150000]\n",
      "loss: 0.459531  [102528/150000]\n",
      "loss: 0.462129  [115328/150000]\n",
      "loss: 0.410761  [128128/150000]\n",
      "loss: 0.489433  [140928/150000]\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.589830  [  128/150000]\n",
      "loss: 0.480953  [12928/150000]\n",
      "loss: 0.422341  [25728/150000]\n",
      "loss: 0.434857  [38528/150000]\n",
      "loss: 0.485595  [51328/150000]\n",
      "loss: 0.453435  [64128/150000]\n",
      "loss: 0.443002  [76928/150000]\n",
      "loss: 0.446981  [89728/150000]\n",
      "loss: 0.482722  [102528/150000]\n",
      "loss: 0.521833  [115328/150000]\n",
      "loss: 0.491671  [128128/150000]\n",
      "loss: 0.435996  [140928/150000]\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.497212  [  128/150000]\n",
      "loss: 0.423837  [12928/150000]\n",
      "loss: 0.463433  [25728/150000]\n",
      "loss: 0.528281  [38528/150000]\n",
      "loss: 0.465710  [51328/150000]\n",
      "loss: 0.464108  [64128/150000]\n",
      "loss: 0.497473  [76928/150000]\n",
      "loss: 0.428275  [89728/150000]\n",
      "loss: 0.480645  [102528/150000]\n",
      "loss: 0.493156  [115328/150000]\n",
      "loss: 0.502576  [128128/150000]\n",
      "loss: 0.445755  [140928/150000]\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.521441  [  128/150000]\n",
      "loss: 0.452088  [12928/150000]\n",
      "loss: 0.513353  [25728/150000]\n",
      "loss: 0.428134  [38528/150000]\n",
      "loss: 0.458712  [51328/150000]\n",
      "loss: 0.411184  [64128/150000]\n",
      "loss: 0.429515  [76928/150000]\n",
      "loss: 0.506249  [89728/150000]\n",
      "loss: 0.475030  [102528/150000]\n",
      "loss: 0.491042  [115328/150000]\n",
      "loss: 0.538414  [128128/150000]\n",
      "loss: 0.455398  [140928/150000]\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.443315  [  128/150000]\n",
      "loss: 0.454775  [12928/150000]\n",
      "loss: 0.536217  [25728/150000]\n",
      "loss: 0.540308  [38528/150000]\n",
      "loss: 0.556393  [51328/150000]\n",
      "loss: 0.534885  [64128/150000]\n",
      "loss: 0.423546  [76928/150000]\n",
      "loss: 0.503169  [89728/150000]\n",
      "loss: 0.539349  [102528/150000]\n",
      "loss: 0.436563  [115328/150000]\n",
      "loss: 0.502997  [128128/150000]\n",
      "loss: 0.569967  [140928/150000]\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.484812  [  128/150000]\n",
      "loss: 0.502302  [12928/150000]\n",
      "loss: 0.534469  [25728/150000]\n",
      "loss: 0.445268  [38528/150000]\n",
      "loss: 0.536710  [51328/150000]\n",
      "loss: 0.451717  [64128/150000]\n",
      "loss: 0.431485  [76928/150000]\n",
      "loss: 0.416351  [89728/150000]\n",
      "loss: 0.498449  [102528/150000]\n",
      "loss: 0.530402  [115328/150000]\n",
      "loss: 0.469863  [128128/150000]\n",
      "loss: 0.500587  [140928/150000]\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.461820  [  128/150000]\n",
      "loss: 0.425429  [12928/150000]\n",
      "loss: 0.453099  [25728/150000]\n",
      "loss: 0.525233  [38528/150000]\n",
      "loss: 0.448087  [51328/150000]\n",
      "loss: 0.489600  [64128/150000]\n",
      "loss: 0.480786  [76928/150000]\n",
      "loss: 0.461886  [89728/150000]\n",
      "loss: 0.481979  [102528/150000]\n",
      "loss: 0.435356  [115328/150000]\n",
      "loss: 0.449368  [128128/150000]\n",
      "loss: 0.484868  [140928/150000]\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.512751  [  128/150000]\n",
      "loss: 0.433313  [12928/150000]\n",
      "loss: 0.529759  [25728/150000]\n",
      "loss: 0.486855  [38528/150000]\n",
      "loss: 0.548874  [51328/150000]\n",
      "loss: 0.432046  [64128/150000]\n",
      "loss: 0.442237  [76928/150000]\n",
      "loss: 0.471729  [89728/150000]\n",
      "loss: 0.423991  [102528/150000]\n",
      "loss: 0.487299  [115328/150000]\n",
      "loss: 0.487927  [128128/150000]\n",
      "loss: 0.399369  [140928/150000]\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.430931  [  128/150000]\n",
      "loss: 0.531895  [12928/150000]\n",
      "loss: 0.458235  [25728/150000]\n",
      "loss: 0.519645  [38528/150000]\n",
      "loss: 0.523219  [51328/150000]\n",
      "loss: 0.456623  [64128/150000]\n",
      "loss: 0.446879  [76928/150000]\n",
      "loss: 0.453297  [89728/150000]\n",
      "loss: 0.409423  [102528/150000]\n",
      "loss: 0.515446  [115328/150000]\n",
      "loss: 0.500335  [128128/150000]\n",
      "loss: 0.456044  [140928/150000]\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.487681  [  128/150000]\n",
      "loss: 0.480514  [12928/150000]\n",
      "loss: 0.445099  [25728/150000]\n",
      "loss: 0.485470  [38528/150000]\n",
      "loss: 0.514402  [51328/150000]\n",
      "loss: 0.464910  [64128/150000]\n",
      "loss: 0.468055  [76928/150000]\n",
      "loss: 0.444841  [89728/150000]\n",
      "loss: 0.485587  [102528/150000]\n",
      "loss: 0.462508  [115328/150000]\n",
      "loss: 0.460013  [128128/150000]\n",
      "loss: 0.452164  [140928/150000]\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.478159  [  128/150000]\n",
      "loss: 0.419260  [12928/150000]\n",
      "loss: 0.429270  [25728/150000]\n",
      "loss: 0.489622  [38528/150000]\n",
      "loss: 0.474391  [51328/150000]\n",
      "loss: 0.507415  [64128/150000]\n",
      "loss: 0.496554  [76928/150000]\n",
      "loss: 0.445720  [89728/150000]\n",
      "loss: 0.426923  [102528/150000]\n",
      "loss: 0.474181  [115328/150000]\n",
      "loss: 0.551305  [128128/150000]\n",
      "loss: 0.483582  [140928/150000]\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.529822  [  128/150000]\n",
      "loss: 0.542521  [12928/150000]\n",
      "loss: 0.418151  [25728/150000]\n",
      "loss: 0.472932  [38528/150000]\n",
      "loss: 0.453489  [51328/150000]\n",
      "loss: 0.481100  [64128/150000]\n",
      "loss: 0.519034  [76928/150000]\n",
      "loss: 0.496672  [89728/150000]\n",
      "loss: 0.563665  [102528/150000]\n",
      "loss: 0.500524  [115328/150000]\n",
      "loss: 0.480223  [128128/150000]\n",
      "loss: 0.548473  [140928/150000]\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.501051  [  128/150000]\n",
      "loss: 0.526817  [12928/150000]\n",
      "loss: 0.505968  [25728/150000]\n",
      "loss: 0.549920  [38528/150000]\n",
      "loss: 0.461793  [51328/150000]\n",
      "loss: 0.450499  [64128/150000]\n",
      "loss: 0.529754  [76928/150000]\n",
      "loss: 0.506645  [89728/150000]\n",
      "loss: 0.481320  [102528/150000]\n",
      "loss: 0.499270  [115328/150000]\n",
      "loss: 0.509225  [128128/150000]\n",
      "loss: 0.529541  [140928/150000]\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.507152  [  128/150000]\n",
      "loss: 0.516344  [12928/150000]\n",
      "loss: 0.443700  [25728/150000]\n",
      "loss: 0.468028  [38528/150000]\n",
      "loss: 0.467251  [51328/150000]\n",
      "loss: 0.528149  [64128/150000]\n",
      "loss: 0.515519  [76928/150000]\n",
      "loss: 0.498221  [89728/150000]\n",
      "loss: 0.475778  [102528/150000]\n",
      "loss: 0.465846  [115328/150000]\n",
      "loss: 0.505544  [128128/150000]\n",
      "loss: 0.458262  [140928/150000]\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.516092  [  128/150000]\n",
      "loss: 0.524714  [12928/150000]\n",
      "loss: 0.452292  [25728/150000]\n",
      "loss: 0.560459  [38528/150000]\n",
      "loss: 0.492094  [51328/150000]\n",
      "loss: 0.448937  [64128/150000]\n",
      "loss: 0.495495  [76928/150000]\n",
      "loss: 0.470372  [89728/150000]\n",
      "loss: 0.523712  [102528/150000]\n",
      "loss: 0.453010  [115328/150000]\n",
      "loss: 0.469409  [128128/150000]\n",
      "loss: 0.490938  [140928/150000]\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.518151  [  128/150000]\n",
      "loss: 0.379002  [12928/150000]\n",
      "loss: 0.511558  [25728/150000]\n",
      "loss: 0.535880  [38528/150000]\n",
      "loss: 0.484606  [51328/150000]\n",
      "loss: 0.457641  [64128/150000]\n",
      "loss: 0.460046  [76928/150000]\n",
      "loss: 0.512791  [89728/150000]\n",
      "loss: 0.480655  [102528/150000]\n",
      "loss: 0.474040  [115328/150000]\n",
      "loss: 0.543583  [128128/150000]\n",
      "loss: 0.476546  [140928/150000]\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.456283  [  128/150000]\n",
      "loss: 0.502488  [12928/150000]\n",
      "loss: 0.482802  [25728/150000]\n",
      "loss: 0.568032  [38528/150000]\n",
      "loss: 0.468486  [51328/150000]\n",
      "loss: 0.522224  [64128/150000]\n",
      "loss: 0.493761  [76928/150000]\n",
      "loss: 0.504974  [89728/150000]\n",
      "loss: 0.478531  [102528/150000]\n",
      "loss: 0.480794  [115328/150000]\n",
      "loss: 0.496344  [128128/150000]\n",
      "loss: 0.511915  [140928/150000]\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.479708  [  128/150000]\n",
      "loss: 0.492517  [12928/150000]\n",
      "loss: 0.466947  [25728/150000]\n",
      "loss: 0.462209  [38528/150000]\n",
      "loss: 0.465366  [51328/150000]\n",
      "loss: 0.468537  [64128/150000]\n",
      "loss: 0.497514  [76928/150000]\n",
      "loss: 0.475379  [89728/150000]\n",
      "loss: 0.522147  [102528/150000]\n",
      "loss: 0.472420  [115328/150000]\n",
      "loss: 0.446128  [128128/150000]\n",
      "loss: 0.490331  [140928/150000]\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.509350  [  128/150000]\n",
      "loss: 0.500773  [12928/150000]\n",
      "loss: 0.435345  [25728/150000]\n",
      "loss: 0.437667  [38528/150000]\n",
      "loss: 0.450269  [51328/150000]\n",
      "loss: 0.486226  [64128/150000]\n",
      "loss: 0.529108  [76928/150000]\n",
      "loss: 0.506791  [89728/150000]\n",
      "loss: 0.447954  [102528/150000]\n",
      "loss: 0.504874  [115328/150000]\n",
      "loss: 0.515304  [128128/150000]\n",
      "loss: 0.460871  [140928/150000]\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.452108  [  128/150000]\n",
      "loss: 0.545369  [12928/150000]\n",
      "loss: 0.429660  [25728/150000]\n",
      "loss: 0.462678  [38528/150000]\n",
      "loss: 0.434861  [51328/150000]\n",
      "loss: 0.456312  [64128/150000]\n",
      "loss: 0.443411  [76928/150000]\n",
      "loss: 0.501497  [89728/150000]\n",
      "loss: 0.462589  [102528/150000]\n",
      "loss: 0.491645  [115328/150000]\n",
      "loss: 0.456806  [128128/150000]\n",
      "loss: 0.490828  [140928/150000]\n",
      "Done!\n",
      "Train data:\n",
      "AUC value is: 0.7782855627295776\n",
      "Accuracy is: 0.7620666666666667\n",
      "Test data:\n",
      "AUC value is: 0.7765197817726602\n",
      "Accuracy is: 0.7638\n"
     ]
    }
   ],
   "source": [
    "# 在所有数据上执行一次攻击\n",
    "attack_model = shadow_attack(sha_models=sha_models, tar_model=tar_model, model_num=num_shadowsets, weight_dir=weight_dir, data_name=DATA_NAME, model=MODEL, model_transform=model_transform, \n",
    "                  model_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=attack_lr, attack_epochs=30, attack_transform=attack_transform, \n",
    "                  device=device, prop_keep=0.5, top_k=3, attack_class=attack_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "bbed93ed-2fe5-4f6f-a3c0-87b7ccdc54ea",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 5000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "fb9a11c9-0655-42b0-9198-ab70bb14a2c7",
   "metadata": {},
   "outputs": [],
   "source": [
    "for tar_model in range(50,100):\n",
    "    targetX = conf_data_all[tar_model].astype(np.float32)\n",
    "    pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "    targetY = train_keep[tar_model]\n",
    "    top_k = 3\n",
    "    if top_k:\n",
    "        # 仅使用概率向量的前3个值\n",
    "        targetX, _ = get_top_k_conf(top_k, targetX, targetX)\n",
    "\n",
    "    targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "    targetX = targetX.astype(np.float32)\n",
    "    \n",
    "    shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "    shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "    attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "    attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "    attack_test_scores = attack_test_scores.reshape(1, attack_test_scores.shape[0])\n",
    "    attack_test_mem = attack_test_mem.reshape(1, attack_test_mem.shape[0])\n",
    "    if tar_model == 50:\n",
    "        attack_test_scores_all = attack_test_scores\n",
    "        attack_test_mem_all = attack_test_mem\n",
    "    else:\n",
    "        attack_test_scores_all = np.concatenate((attack_test_scores_all, attack_test_scores), axis=0)\n",
    "        attack_test_mem_all = np.concatenate((attack_test_mem_all, attack_test_mem), axis=0)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "b41cb57c-fa45-4c06-b852-f126066c8d66",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "c99599b0-79c7-432f-81cf-c4b42d83a900",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.88192\n",
      "MPLR 0.87636\n",
      "base 0.7611156\n"
     ]
    }
   ],
   "source": [
    "pred_result_all = attack_test_scores_all > 0.5\n",
    "pred_clip = pred_result_all[:, risk_rank[:x]]\n",
    "mem_clip = attack_test_mem_all[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result_all[:, pri_risk_rank[:x]]\n",
    "mem_clip = attack_test_mem_all[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result_all\n",
    "mem_clip = attack_test_mem_all\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "54cc6279-724e-4b0d-9832-b8a229d97baa",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a9fc7363-4124-48cc-ba6e-f94fa785b664",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c680810-e9ff-421e-ae8f-bc9894ebe31d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "8242311e-d469-4552-8a6a-edd4f8ae6b77",
   "metadata": {},
   "source": [
    "### 扰动攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "id": "d16877a3-3573-40cc-bbcf-04b56ea5ab58",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 76,
   "id": "7d00e41f-4d8e-43c6-9179-5e1f0fd410ee",
   "metadata": {},
   "outputs": [],
   "source": [
    "nums = 30\n",
    "# sigma_list = [0.15]\n",
    "sigma_list = [0.05]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "id": "7d73692c-d173-4d57-9629-3d9bfc49576a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 81.7%, Avg loss: 0.800956 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911947 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.3%, Avg loss: 0.913652 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912096 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.1%, Avg loss: 0.912788 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911369 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.1%, Avg loss: 0.914890 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.913062 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.3%, Avg loss: 0.912872 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.3%, Avg loss: 0.910408 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911039 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912157 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.913061 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911500 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911704 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911660 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.913402 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911563 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.914076 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.914273 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.911721 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.913356 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912765 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912062 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.1%, Avg loss: 0.913411 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.913118 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.1%, Avg loss: 0.912670 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.3%, Avg loss: 0.911023 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912922 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.1%, Avg loss: 0.914098 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 79.2%, Avg loss: 0.912723 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "tar_model = 0\n",
    "# 创建对应的目标模型\n",
    "if model in ['NN', 'NN_4layer']:\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](X_data.shape[1], Y_data.max()+1)\n",
    "elif model == 'CNN':\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](Y_data.max()+1, data_name)\n",
    "else:\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](Y_data.max()+1)\n",
    "# 加载参数\n",
    "weight_path = os.path.join(weight_dir, weight_part + \"{}.pth\".format(tar_model))\n",
    "# print(Reference_Model)\n",
    "Target_Model.load_state_dict(torch.load(weight_path))\n",
    "Target_Model.to(device)\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "pred_result, _ = Label_attack(all_dataloader, Target_Model, loss_fn, device, sigma_list, nums)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "id": "69612d0a-da65-4cf4-bd8e-a4cd48cf7db4",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 81,
   "id": "e7b127ba-ad90-4914-96f0-23824ae5710a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.85\n",
      "MPLR 0.926\n",
      "base 0.71916\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[risk_rank[:x]]\n",
    "mem_clip = train_keep[tar_model][risk_rank[:x]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[tar_model][pri_risk_rank[:x]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep[tar_model]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b85904e0-9148-473d-a555-5105d1329a3c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d1c9a8a-b2eb-4fda-9e65-a211854978bd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2558d86f-fd65-48dc-9d0e-95b1911767f4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "b7c02f7c-f20f-4130-9981-11290ba523e5",
   "metadata": {},
   "source": [
    "### 绘制训练轮次的影响"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "a391985c-2402-429a-9c33-af04c3606f5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_transform = transforms.Compose([\n",
    "    transforms.ToPILImage(),\n",
    "    transforms.RandomCrop(32, padding=4),  #先四周填充0，在吧图像随机裁剪成32*32\n",
    "    transforms.RandomHorizontalFlip(),\n",
    "    transforms.RandomRotation(15),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n",
    "    ])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "6e4df795-b777-45fc-93b8-3ebe0132522a",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500\n",
    "shadow_result = []\n",
    "LIRA_result = []\n",
    "gene_distance = []\n",
    "risk_base_result = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "79ce3d55-5bf1-4638-aa8d-2002b59cefd8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "test_dataset = datasets.cifar.CIFAR100(root='../datasets/cifar100', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data\n",
    "y_test_data = np.array(test_dataset.targets)\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "235995fa-07e3-4f1f-84fb-bf42a7532d5c",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 4.771416  [  128/25020]\n",
      "loss: 3.954310  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 6.1%\n",
      "Test Error: \n",
      " Accuracy: 6.8%, Avg loss: 4.150562 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 7.6%, Avg loss: 4.009405 \n",
      "\n",
      " Error: \n",
      " Accuracy: 6.7%  \n",
      "\n",
      "AUC value is: 0.5033314205321091\n",
      "Accuracy is: 0.49968\n",
      "AUC value is: 0.5060868534955862\n",
      "Accuracy is: 0.50068\n",
      "Test Error: \n",
      " Accuracy: 6.7%, Avg loss: 4.142799 \n",
      "\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 3.886953  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 3.676574  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 11.1%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 3.782609  [  128/25020]\n",
      "loss: 3.276825  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 15.5%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 3.565711  [  128/25020]\n",
      "loss: 3.111509  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 19.8%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 3.339386  [  128/25020]\n",
      "loss: 2.786531  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 23.9%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 3.126855  [  128/25020]\n",
      "loss: 2.728769  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 28.1%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 2.905062  [  128/25020]\n",
      "loss: 2.429550  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 32.1%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 2.718537  [  128/25020]\n",
      "loss: 2.361345  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 35.8%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 2.604837  [  128/25020]\n",
      "loss: 2.178747  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 38.8%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 2.305986  [  128/25020]\n",
      "loss: 2.066473  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 42.2%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 2.191412  [  128/25020]\n",
      "loss: 1.901207  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 44.7%\n",
      "Test Error: \n",
      " Accuracy: 38.6%, Avg loss: 2.432739 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 41.5%, Avg loss: 2.193768 \n",
      "\n",
      " Error: \n",
      " Accuracy: 41.1%  \n",
      "\n",
      "AUC value is: 0.5301001192640763\n",
      "Accuracy is: 0.5043\n",
      "AUC value is: 0.5384563158120421\n",
      "Accuracy is: 0.5034\n",
      "Test Error: \n",
      " Accuracy: 41.1%, Avg loss: 2.269484 \n",
      "\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 2.145893  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 1.935952  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 47.2%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 2.011904  [  128/25020]\n",
      "loss: 1.798716  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 49.8%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 1.752548  [  128/25020]\n",
      "loss: 1.632884  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 51.2%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 1.798790  [  128/25020]\n",
      "loss: 1.499895  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 53.0%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 1.691642  [  128/25020]\n",
      "loss: 1.448162  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 54.5%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 1.594075  [  128/25020]\n",
      "loss: 1.435549  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 56.5%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 1.700196  [  128/25020]\n",
      "loss: 1.443819  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 57.8%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 1.504344  [  128/25020]\n",
      "loss: 1.348934  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 58.7%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 1.538950  [  128/25020]\n",
      "loss: 1.224054  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 60.7%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 1.165907  [  128/25020]\n",
      "loss: 1.265193  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 60.8%\n",
      "Test Error: \n",
      " Accuracy: 45.0%, Avg loss: 2.264578 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 52.7%, Avg loss: 1.692529 \n",
      "\n",
      " Error: \n",
      " Accuracy: 50.6%  \n",
      "\n",
      "AUC value is: 0.5615784994102396\n",
      "Accuracy is: 0.52254\n",
      "AUC value is: 0.584673345390941\n",
      "Accuracy is: 0.5189\n",
      "Test Error: \n",
      " Accuracy: 50.6%, Avg loss: 1.931541 \n",
      "\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 1.372882  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 1.130891  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 62.6%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 1.235183  [  128/25020]\n",
      "loss: 1.069243  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 62.7%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 1.229324  [  128/25020]\n",
      "loss: 0.958006  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 64.0%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 1.305106  [  128/25020]\n",
      "loss: 1.053162  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 65.3%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 1.158257  [  128/25020]\n",
      "loss: 1.164046  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 65.4%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 1.227389  [  128/25020]\n",
      "loss: 0.998943  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 66.6%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 1.028513  [  128/25020]\n",
      "loss: 1.018726  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 66.7%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 1.186972  [  128/25020]\n",
      "loss: 0.898667  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 67.4%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 1.153079  [  128/25020]\n",
      "loss: 0.671200  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 78.2%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.611190  [  128/25020]\n",
      "loss: 0.463580  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 84.4%\n",
      "Test Error: \n",
      " Accuracy: 61.5%, Avg loss: 1.529334 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 85.8%, Avg loss: 0.483374 \n",
      "\n",
      " Error: \n",
      " Accuracy: 74.5%  \n",
      "\n",
      "AUC value is: 0.658100087584056\n",
      "Accuracy is: 0.6\n",
      "AUC value is: 0.7573026510736967\n",
      "Accuracy is: 0.63436\n",
      "Test Error: \n",
      " Accuracy: 74.5%, Avg loss: 0.961964 \n",
      "\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.446440  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.419309  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 86.9%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.434545  [  128/25020]\n",
      "loss: 0.338667  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.2%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.333508  [  128/25020]\n",
      "loss: 0.285945  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 90.9%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.275846  [  128/25020]\n",
      "loss: 0.182914  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 92.6%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.274127  [  128/25020]\n",
      "loss: 0.252994  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.201299  [  128/25020]\n",
      "loss: 0.135762  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 94.7%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.157103  [  128/25020]\n",
      "loss: 0.192693  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 95.4%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.166265  [  128/25020]\n",
      "loss: 0.125299  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 95.8%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.131194  [  128/25020]\n",
      "loss: 0.123746  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 96.3%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.115234  [  128/25020]\n",
      "loss: 0.134780  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 96.7%\n",
      "Test Error: \n",
      " Accuracy: 61.1%, Avg loss: 1.741962 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 95.2%, Avg loss: 0.176208 \n",
      "\n",
      " Error: \n",
      " Accuracy: 79.0%  \n",
      "\n",
      "AUC value is: 0.7272788158584422\n",
      "Accuracy is: 0.68802\n",
      "AUC value is: 0.8469788108664389\n",
      "Accuracy is: 0.73982\n",
      "Test Error: \n",
      " Accuracy: 79.0%, Avg loss: 0.925036 \n",
      "\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.106521  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.108239  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 97.1%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.128478  [  128/25020]\n",
      "loss: 0.090319  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 97.3%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.119584  [  128/25020]\n",
      "loss: 0.112891  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 97.5%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.093392  [  128/25020]\n",
      "loss: 0.113006  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 97.5%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.120809  [  128/25020]\n",
      "loss: 0.079275  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 97.8%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.080399  [  128/25020]\n",
      "loss: 0.093225  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 98.1%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.125963  [  128/25020]\n",
      "loss: 0.052177  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 98.1%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.065544  [  128/25020]\n",
      "loss: 0.094535  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 98.0%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.107095  [  128/25020]\n",
      "loss: 0.059723  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 98.7%\n",
      "Epoch 51\n",
      "-------------------------------\n",
      "loss: 0.090623  [  128/25020]\n",
      "loss: 0.026139  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.3%\n",
      "Test Error: \n",
      " Accuracy: 63.1%, Avg loss: 1.633211 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.2%, Avg loss: 0.051499 \n",
      "\n",
      " Error: \n",
      " Accuracy: 81.4%  \n",
      "\n",
      "AUC value is: 0.7665037985624312\n",
      "Accuracy is: 0.74826\n",
      "AUC value is: 0.8984355573987567\n",
      "Accuracy is: 0.79482\n",
      "Test Error: \n",
      " Accuracy: 81.4%, Avg loss: 0.834100 \n",
      "\n",
      "Epoch 52\n",
      "-------------------------------\n",
      "loss: 0.056885  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.065773  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.5%\n",
      "Epoch 53\n",
      "-------------------------------\n",
      "loss: 0.032345  [  128/25020]\n",
      "loss: 0.029069  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 54\n",
      "-------------------------------\n",
      "loss: 0.030543  [  128/25020]\n",
      "loss: 0.022037  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 55\n",
      "-------------------------------\n",
      "loss: 0.034921  [  128/25020]\n",
      "loss: 0.035086  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 56\n",
      "-------------------------------\n",
      "loss: 0.045153  [  128/25020]\n",
      "loss: 0.030825  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 57\n",
      "-------------------------------\n",
      "loss: 0.050135  [  128/25020]\n",
      "loss: 0.026524  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 58\n",
      "-------------------------------\n",
      "loss: 0.031348  [  128/25020]\n",
      "loss: 0.018146  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 59\n",
      "-------------------------------\n",
      "loss: 0.035440  [  128/25020]\n",
      "loss: 0.022401  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 60\n",
      "-------------------------------\n",
      "loss: 0.028117  [  128/25020]\n",
      "loss: 0.018011  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 61\n",
      "-------------------------------\n",
      "loss: 0.039924  [  128/25020]\n",
      "loss: 0.021259  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Test Error: \n",
      " Accuracy: 63.9%, Avg loss: 1.600058 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.7%, Avg loss: 0.032706 \n",
      "\n",
      " Error: \n",
      " Accuracy: 81.7%  \n",
      "\n",
      "AUC value is: 0.777274571855726\n",
      "Accuracy is: 0.76428\n",
      "AUC value is: 0.9080756131683925\n",
      "Accuracy is: 0.80528\n",
      "Test Error: \n",
      " Accuracy: 81.7%, Avg loss: 0.811760 \n",
      "\n",
      "Epoch 62\n",
      "-------------------------------\n",
      "loss: 0.025206  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.022597  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 63\n",
      "-------------------------------\n",
      "loss: 0.025564  [  128/25020]\n",
      "loss: 0.017931  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 64\n",
      "-------------------------------\n",
      "loss: 0.027061  [  128/25020]\n",
      "loss: 0.028570  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 65\n",
      "-------------------------------\n",
      "loss: 0.025618  [  128/25020]\n",
      "loss: 0.026233  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 66\n",
      "-------------------------------\n",
      "loss: 0.020975  [  128/25020]\n",
      "loss: 0.030238  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 67\n",
      "-------------------------------\n",
      "loss: 0.030541  [  128/25020]\n",
      "loss: 0.013952  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 68\n",
      "-------------------------------\n",
      "loss: 0.020938  [  128/25020]\n",
      "loss: 0.015592  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 69\n",
      "-------------------------------\n",
      "loss: 0.030356  [  128/25020]\n",
      "loss: 0.017515  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 70\n",
      "-------------------------------\n",
      "loss: 0.023006  [  128/25020]\n",
      "loss: 0.017680  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 71\n",
      "-------------------------------\n",
      "loss: 0.021222  [  128/25020]\n",
      "loss: 0.015342  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Test Error: \n",
      " Accuracy: 64.2%, Avg loss: 1.569831 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.8%, Avg loss: 0.026507 \n",
      "\n",
      " Error: \n",
      " Accuracy: 81.9%  \n",
      "\n",
      "AUC value is: 0.7822876606641029\n",
      "Accuracy is: 0.77072\n",
      "AUC value is: 0.912498462399016\n",
      "Accuracy is: 0.8095\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 0.796233 \n",
      "\n",
      "Epoch 72\n",
      "-------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.020129  [  128/25020]\n",
      "loss: 0.010982  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 73\n",
      "-------------------------------\n",
      "loss: 0.016266  [  128/25020]\n",
      "loss: 0.011264  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 74\n",
      "-------------------------------\n",
      "loss: 0.018171  [  128/25020]\n",
      "loss: 0.015767  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 75\n",
      "-------------------------------\n",
      "loss: 0.018337  [  128/25020]\n",
      "loss: 0.015944  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 76\n",
      "-------------------------------\n",
      "loss: 0.016566  [  128/25020]\n",
      "loss: 0.014045  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 77\n",
      "-------------------------------\n",
      "loss: 0.020182  [  128/25020]\n",
      "loss: 0.010898  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 78\n",
      "-------------------------------\n",
      "loss: 0.023304  [  128/25020]\n",
      "loss: 0.010965  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 79\n",
      "-------------------------------\n",
      "loss: 0.022272  [  128/25020]\n",
      "loss: 0.015483  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 80\n",
      "-------------------------------\n",
      "loss: 0.020809  [  128/25020]\n",
      "loss: 0.021417  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 81\n",
      "-------------------------------\n",
      "loss: 0.027949  [  128/25020]\n",
      "loss: 0.012315  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Test Error: \n",
      " Accuracy: 64.3%, Avg loss: 1.561767 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.9%, Avg loss: 0.024417 \n",
      "\n",
      " Error: \n",
      " Accuracy: 81.9%  \n",
      "\n",
      "AUC value is: 0.7834367085994935\n",
      "Accuracy is: 0.77212\n",
      "AUC value is: 0.9138929192914683\n",
      "Accuracy is: 0.81004\n",
      "Test Error: \n",
      " Accuracy: 81.9%, Avg loss: 0.792493 \n",
      "\n",
      "Epoch 82\n",
      "-------------------------------\n",
      "loss: 0.023850  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.011319  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 83\n",
      "-------------------------------\n",
      "loss: 0.017684  [  128/25020]\n",
      "loss: 0.016970  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 84\n",
      "-------------------------------\n",
      "loss: 0.018059  [  128/25020]\n",
      "loss: 0.016385  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 85\n",
      "-------------------------------\n",
      "loss: 0.018042  [  128/25020]\n",
      "loss: 0.016678  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 86\n",
      "-------------------------------\n",
      "loss: 0.016074  [  128/25020]\n",
      "loss: 0.011525  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 87\n",
      "-------------------------------\n",
      "loss: 0.016934  [  128/25020]\n",
      "loss: 0.018423  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 100.0%\n",
      "Epoch 88\n",
      "-------------------------------\n",
      "loss: 0.013151  [  128/25020]\n",
      "loss: 0.015470  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 89\n",
      "-------------------------------\n",
      "loss: 0.018945  [  128/25020]\n",
      "loss: 0.016532  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 90\n",
      "-------------------------------\n",
      "loss: 0.025180  [  128/25020]\n",
      "loss: 0.017686  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 91\n",
      "-------------------------------\n",
      "loss: 0.025375  [  128/25020]\n",
      "loss: 0.015982  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Test Error: \n",
      " Accuracy: 64.3%, Avg loss: 1.556100 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.8%, Avg loss: 0.024169 \n",
      "\n",
      " Error: \n",
      " Accuracy: 82.0%  \n",
      "\n",
      "AUC value is: 0.7844686476599345\n",
      "Accuracy is: 0.77368\n",
      "AUC value is: 0.9148068062763559\n",
      "Accuracy is: 0.81184\n",
      "Test Error: \n",
      " Accuracy: 82.0%, Avg loss: 0.788907 \n",
      "\n",
      "Epoch 92\n",
      "-------------------------------\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 0.036077  [  128/25020]\n",
      "loss: 0.013699  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 93\n",
      "-------------------------------\n",
      "loss: 0.018358  [  128/25020]\n",
      "loss: 0.014158  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 94\n",
      "-------------------------------\n",
      "loss: 0.019679  [  128/25020]\n",
      "loss: 0.018492  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 100.0%\n",
      "Epoch 95\n",
      "-------------------------------\n",
      "loss: 0.016462  [  128/25020]\n",
      "loss: 0.016524  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 100.0%\n",
      "Epoch 96\n",
      "-------------------------------\n",
      "loss: 0.021494  [  128/25020]\n",
      "loss: 0.008748  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 97\n",
      "-------------------------------\n",
      "loss: 0.019676  [  128/25020]\n",
      "loss: 0.012327  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 98\n",
      "-------------------------------\n",
      "loss: 0.019592  [  128/25020]\n",
      "loss: 0.011481  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 99\n",
      "-------------------------------\n",
      "loss: 0.014682  [  128/25020]\n",
      "loss: 0.014643  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 100.0%\n",
      "Epoch 100\n",
      "-------------------------------\n",
      "loss: 0.015102  [  128/25020]\n",
      "loss: 0.016236  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 100.0%\n"
     ]
    }
   ],
   "source": [
    "(x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = load_CIFAR100(0, 100, prop_keep=0.5, seed=0)\n",
    "training_data = CustomDataset(x_train, y_train, train_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=batch_size)\n",
    "if model in ['NN', 'NN_4layer']:\n",
    "    TargetModel = globals()['create_{}_model'.format(model)](x_train.shape[1], y_train.max()+1)\n",
    "elif model == 'CNN':\n",
    "    TargetModel = globals()['create_{}_model'.format(model)](y_train.max()+1, data_name)\n",
    "else:\n",
    "    TargetModel = globals()['create_{}_model'.format(model)](y_train.max()+1)\n",
    "# print(TargetModel)\n",
    "TargetModel.to(device)\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.SGD(TargetModel.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=5e-4)\n",
    "train_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 50, 70, 90], gamma=0.2)\n",
    "for t in range(100):\n",
    "    print(f\"Epoch {t+1}\\n-------------------------------\")\n",
    "    if t > 0:\n",
    "        train_scheduler.step(t+1)\n",
    "    train(train_dataloader, TargetModel, loss_fn, optimizer, device)\n",
    "\n",
    "    if t%10 == 0:\n",
    "        test_acc = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "        train_acc = evaluate(train_dataloader, TargetModel, loss_fn, device)\n",
    "        distance = train_acc - test_acc\n",
    "        gene_distance.append(distance)\n",
    "        \n",
    "        mem_label = train_keep[0]\n",
    "        # 加载目标模型\n",
    "        \n",
    "        \n",
    "        conf_data, label_data = get_model_pred(all_dataloader, TargetModel, device)\n",
    "        conf_data = conf_data.detach().cpu().numpy()\n",
    "        label_data = label_data.detach().cpu().numpy()\n",
    "        conf_data = conf_data.astype(np.float64)\n",
    "        score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    \n",
    "        # 执行影子模型攻击\n",
    "        targetX = conf_data\n",
    "        pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "        targetY = mem_label\n",
    "        targetX, _ = get_top_k_conf(3, targetX, targetX)\n",
    "        targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "        targetX = targetX.astype(np.float32)\n",
    "        shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "        shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "        attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "        attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "        accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "        shadow_result.append(accuracy)\n",
    "    \n",
    "        # 执行风险评估攻击\n",
    "        score_tar = cal_score(conf_data.copy(), label_data)\n",
    "        pri_risk_t = pri_risk_all\n",
    "        pri_risk_rank_t = np.argsort(pri_risk_t)\n",
    "        pri_risk_rank_t = np.flip(pri_risk_rank_t)\n",
    "    \n",
    "        pred_result = LIRA_attack(train_keep, score_all, score_tar, mem_label)\n",
    "        evaluate_ROC(pred_result, mem_label, threshold=0)\n",
    "        pred_clip = pred_result[pri_risk_rank_t[:x]]\n",
    "        mem_clip = mem_label[pri_risk_rank_t[:x]]\n",
    "        pred_clip = pred_clip > 0\n",
    "        accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "        LIRA_result.append(accuracy)\n",
    "\n",
    "        # 基线攻击\n",
    "        pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "        pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "        mem_clip = train_keep[0][pri_risk_rank[:x]]\n",
    "        accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "        risk_base_result.append(accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "f6d4f6f4-6a7e-4a0e-b609-b6d02ee3f65b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.49968,\n",
       " 0.5043,\n",
       " 0.52254,\n",
       " 0.6,\n",
       " 0.68802,\n",
       " 0.74826,\n",
       " 0.76428,\n",
       " 0.77072,\n",
       " 0.77212,\n",
       " 0.77368]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "shadow_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "e3f9cf29-cee0-4568-a082-9cb2dcaa2cb7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.496, 0.522, 0.556, 0.776, 0.968, 0.992, 0.99, 0.992, 0.992, 0.992]"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "LIRA_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7584d45e-92da-4cef-bcc8-1d272754fc24",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e4c0a6bb-cd50-496a-9047-ac31727ec269",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3c6aa02b-4bf2-4550-bfe6-28b7d0ea13c3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "d3ffe038-93e0-4bc2-898c-01fcfce55ae1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 46.4%, Avg loss: 2.179250 \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.4637"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluate(test_dataloader, TargetModel, loss_fn, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "dea16262-1e94-4e69-9b4c-d38c04aad371",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 64.5%, Avg loss: 1.584795 \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.6445"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "evaluate(test_dataloader, Model, loss_fn, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1e01724b-9357-45f7-b96d-706247a38027",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "36aa4f36-f262-4aa8-af57-61ce3a607b11",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "d9a36bfa-3abb-4979-acfe-a35c193d5218",
   "metadata": {},
   "source": [
    "### 正则化手段的影响"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "eb371594-e764-4948-bb89-f5a25713a911",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_transform = transforms.Compose([\n",
    "    transforms.ToPILImage(),\n",
    "    transforms.RandomCrop(32, padding=4),  #先四周填充0，在吧图像随机裁剪成32*32\n",
    "    transforms.RandomHorizontalFlip(),\n",
    "    transforms.RandomRotation(15),\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n",
    "    ])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "f3c9918f-2b19-4cc1-a1cf-47e5f3e77f24",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500\n",
    "shadow_result = []\n",
    "LIRA_result = []\n",
    "gene_distance = []\n",
    "base_result = []\n",
    "risk_base_result = []\n",
    "test_acc_list = []\n",
    "train_acc_list = []\n",
    "# l2_norm_list = [1e-4, 5e-4, 1e-3, 2e-3, 3e-3, 4e-3, 5e-3, 6e-3, 7e-3, 8e-3, 9e-3, 1e-2]\n",
    "# l2_norm_list = [0, 5e-4, 1e-3, 5e-3, 8e-3, 1e-2]\n",
    "l2_norm_list = [3e-3]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "f3402ad2-3a2e-4edd-a7b2-384f720ccb90",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "test_dataset = datasets.cifar.CIFAR100(root='../datasets/cifar100', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data\n",
    "y_test_data = np.array(test_dataset.targets)\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "4483dd10-bf97-4752-91ed-444d4c2abe7c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ResidualBlock(nn.Module):\n",
    "    def __init__(self, inchannel, outchannel, stride=1):\n",
    "        super(ResidualBlock, self).__init__()\n",
    "        self.left = nn.Sequential(\n",
    "            nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel)\n",
    "        )\n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or inchannel != outchannel:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(outchannel)\n",
    "            )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.left(x)\n",
    "        out += self.shortcut(x)\n",
    "        out = F.relu(out)\n",
    "        return out\n",
    "\n",
    "class ResNet(nn.Module):\n",
    "    def __init__(self, ResidualBlock, num_classes=10):\n",
    "        super(ResNet, self).__init__()\n",
    "        self.inchannel = 64\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(),\n",
    "        )\n",
    "        self.layer1 = self.make_layer(ResidualBlock, 64,  2, stride=1)\n",
    "        self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n",
    "        self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n",
    "        self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n",
    "        self.fc = nn.Linear(512, num_classes)\n",
    "        self.dropout = nn.Dropout(p=0.5)\n",
    "\n",
    "    def make_layer(self, block, channels, num_blocks, stride):\n",
    "        strides = [stride] + [1] * (num_blocks - 1)   #strides=[1,1]\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.inchannel, channels, stride))\n",
    "            self.inchannel = channels\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.layer1(out)\n",
    "        out = self.dropout(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.dropout(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.dropout(out)\n",
    "        out = self.layer4(out)\n",
    "        out = self.dropout(out)\n",
    "        out = F.avg_pool2d(out, 4)\n",
    "        out = out.view(out.size(0), -1)\n",
    "        out = self.fc(out)\n",
    "        return out"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "49350276-e4e5-46a6-b9ec-e996a7d63550",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 4.604994  [  128/25020]\n",
      "loss: 4.352406  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 3.1%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 4.130325  [  128/25020]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ecpkn/.conda/envs/opacus/lib/python3.8/site-packages/torch/optim/lr_scheduler.py:156: UserWarning: The epoch parameter in `scheduler.step()` was not necessary and is being deprecated where possible. Please use `scheduler.step()` to step the scheduler. During the deprecation, if epoch is different from None, the closed form is used instead of the new chainable form, where available. Please open an issue if you are unable to replicate your use case: https://github.com/pytorch/pytorch/issues/new/choose.\n",
      "  warnings.warn(EPOCH_DEPRECATION_WARNING, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loss: 4.044290  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 6.4%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 4.074186  [  128/25020]\n",
      "loss: 3.803587  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 8.9%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 3.887410  [  128/25020]\n",
      "loss: 3.717208  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 10.8%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 3.854593  [  128/25020]\n",
      "loss: 3.573496  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 12.6%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 3.770935  [  128/25020]\n",
      "loss: 3.431850  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 13.8%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 3.734405  [  128/25020]\n",
      "loss: 3.337080  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 15.3%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 3.554188  [  128/25020]\n",
      "loss: 3.317826  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 16.9%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 3.500574  [  128/25020]\n",
      "loss: 3.170292  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 17.7%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 3.453521  [  128/25020]\n",
      "loss: 3.082377  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 19.0%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 3.381395  [  128/25020]\n",
      "loss: 3.100338  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 20.2%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 3.304533  [  128/25020]\n",
      "loss: 3.073067  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 20.9%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 3.227792  [  128/25020]\n",
      "loss: 2.983116  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 22.1%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 3.337789  [  128/25020]\n",
      "loss: 3.019282  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 22.6%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 3.352387  [  128/25020]\n",
      "loss: 2.967821  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 23.1%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 3.119564  [  128/25020]\n",
      "loss: 2.803198  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 23.9%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 3.122679  [  128/25020]\n",
      "loss: 2.970608  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 24.6%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 3.148401  [  128/25020]\n",
      "loss: 2.820848  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 25.0%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 3.143784  [  128/25020]\n",
      "loss: 2.813088  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 25.5%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 3.001078  [  128/25020]\n",
      "loss: 2.819343  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 26.1%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 3.069802  [  128/25020]\n",
      "loss: 2.830078  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 26.0%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 2.965660  [  128/25020]\n",
      "loss: 2.871024  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 26.4%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 3.012481  [  128/25020]\n",
      "loss: 2.790265  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 26.7%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 3.188552  [  128/25020]\n",
      "loss: 2.785414  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 26.7%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 3.033582  [  128/25020]\n",
      "loss: 2.822706  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 27.0%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 2.898170  [  128/25020]\n",
      "loss: 2.708287  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 27.3%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 2.943702  [  128/25020]\n",
      "loss: 2.672133  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 27.3%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 2.944303  [  128/25020]\n",
      "loss: 2.722376  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 28.0%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 2.960257  [  128/25020]\n",
      "loss: 2.735361  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 27.7%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 3.015043  [  128/25020]\n",
      "loss: 2.160978  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 38.4%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 2.239458  [  128/25020]\n",
      "loss: 2.086738  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 41.8%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 2.226853  [  128/25020]\n",
      "loss: 1.967218  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 42.8%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 2.187947  [  128/25020]\n",
      "loss: 1.946413  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 42.9%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 2.451430  [  128/25020]\n",
      "loss: 1.911671  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 43.8%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 2.157666  [  128/25020]\n",
      "loss: 1.934374  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 44.5%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 2.242328  [  128/25020]\n",
      "loss: 1.943076  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 44.8%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 2.172808  [  128/25020]\n",
      "loss: 1.956049  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 44.4%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 2.146455  [  128/25020]\n",
      "loss: 1.885245  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 45.5%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 2.120597  [  128/25020]\n",
      "loss: 1.957095  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 45.7%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 2.102189  [  128/25020]\n",
      "loss: 1.915435  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 46.0%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 2.104128  [  128/25020]\n",
      "loss: 1.934398  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 47.1%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 2.080261  [  128/25020]\n",
      "loss: 1.853593  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 47.4%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 1.992854  [  128/25020]\n",
      "loss: 1.854296  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 47.3%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 1.966002  [  128/25020]\n",
      "loss: 1.849767  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 47.7%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 2.131210  [  128/25020]\n",
      "loss: 1.799142  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 48.2%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 1.990415  [  128/25020]\n",
      "loss: 1.826046  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 48.8%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 1.994051  [  128/25020]\n",
      "loss: 1.752375  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 49.2%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 1.890146  [  128/25020]\n",
      "loss: 1.749340  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 49.4%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 2.002339  [  128/25020]\n",
      "loss: 1.805377  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 49.5%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 1.911263  [  128/25020]\n",
      "loss: 1.429528  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 57.4%\n",
      "Epoch 51\n",
      "-------------------------------\n",
      "loss: 1.499612  [  128/25020]\n",
      "loss: 1.396815  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 59.8%\n",
      "Epoch 52\n",
      "-------------------------------\n",
      "loss: 1.440286  [  128/25020]\n",
      "loss: 1.271578  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 61.5%\n",
      "Epoch 53\n",
      "-------------------------------\n",
      "loss: 1.453528  [  128/25020]\n",
      "loss: 1.295609  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 62.6%\n",
      "Epoch 54\n",
      "-------------------------------\n",
      "loss: 1.534582  [  128/25020]\n",
      "loss: 1.159503  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 63.3%\n",
      "Epoch 55\n",
      "-------------------------------\n",
      "loss: 1.388270  [  128/25020]\n",
      "loss: 1.229545  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 64.6%\n",
      "Epoch 56\n",
      "-------------------------------\n",
      "loss: 1.285595  [  128/25020]\n",
      "loss: 1.141109  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 64.9%\n",
      "Epoch 57\n",
      "-------------------------------\n",
      "loss: 1.316507  [  128/25020]\n",
      "loss: 1.034570  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 65.4%\n",
      "Epoch 58\n",
      "-------------------------------\n",
      "loss: 1.279075  [  128/25020]\n",
      "loss: 1.241687  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 65.9%\n",
      "Epoch 59\n",
      "-------------------------------\n",
      "loss: 1.261572  [  128/25020]\n",
      "loss: 1.107507  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 66.4%\n",
      "Epoch 60\n",
      "-------------------------------\n",
      "loss: 1.186118  [  128/25020]\n",
      "loss: 1.102006  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 66.7%\n",
      "Epoch 61\n",
      "-------------------------------\n",
      "loss: 1.128819  [  128/25020]\n",
      "loss: 1.066055  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 66.9%\n",
      "Epoch 62\n",
      "-------------------------------\n",
      "loss: 1.120118  [  128/25020]\n",
      "loss: 1.089655  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 67.7%\n",
      "Epoch 63\n",
      "-------------------------------\n",
      "loss: 1.178368  [  128/25020]\n",
      "loss: 1.059101  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 67.8%\n",
      "Epoch 64\n",
      "-------------------------------\n",
      "loss: 1.164634  [  128/25020]\n",
      "loss: 1.050771  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 68.2%\n",
      "Epoch 65\n",
      "-------------------------------\n",
      "loss: 1.066362  [  128/25020]\n",
      "loss: 0.989133  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 68.5%\n",
      "Epoch 66\n",
      "-------------------------------\n",
      "loss: 1.122492  [  128/25020]\n",
      "loss: 1.018094  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 68.7%\n",
      "Epoch 67\n",
      "-------------------------------\n",
      "loss: 1.150347  [  128/25020]\n",
      "loss: 0.982388  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 69.2%\n",
      "Epoch 68\n",
      "-------------------------------\n",
      "loss: 1.124883  [  128/25020]\n",
      "loss: 1.036924  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 69.7%\n",
      "Epoch 69\n",
      "-------------------------------\n",
      "loss: 1.180901  [  128/25020]\n",
      "loss: 0.926419  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 69.9%\n",
      "Epoch 70\n",
      "-------------------------------\n",
      "loss: 1.161383  [  128/25020]\n",
      "loss: 0.751788  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 75.1%\n",
      "Epoch 71\n",
      "-------------------------------\n",
      "loss: 0.874222  [  128/25020]\n",
      "loss: 0.702787  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 77.7%\n",
      "Epoch 72\n",
      "-------------------------------\n",
      "loss: 0.861013  [  128/25020]\n",
      "loss: 0.690481  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 78.4%\n",
      "Epoch 73\n",
      "-------------------------------\n",
      "loss: 0.731230  [  128/25020]\n",
      "loss: 0.684598  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 79.2%\n",
      "Epoch 74\n",
      "-------------------------------\n",
      "loss: 0.817295  [  128/25020]\n",
      "loss: 0.661478  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 80.3%\n",
      "Epoch 75\n",
      "-------------------------------\n",
      "loss: 0.796628  [  128/25020]\n",
      "loss: 0.679130  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 80.2%\n",
      "Epoch 76\n",
      "-------------------------------\n",
      "loss: 0.653405  [  128/25020]\n",
      "loss: 0.575572  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 81.0%\n",
      "Epoch 77\n",
      "-------------------------------\n",
      "loss: 0.684972  [  128/25020]\n",
      "loss: 0.595116  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 81.4%\n",
      "Epoch 78\n",
      "-------------------------------\n",
      "loss: 0.672100  [  128/25020]\n",
      "loss: 0.687543  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 81.4%\n",
      "Epoch 79\n",
      "-------------------------------\n",
      "loss: 0.794127  [  128/25020]\n",
      "loss: 0.541542  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 82.3%\n",
      "Epoch 80\n",
      "-------------------------------\n",
      "loss: 0.629451  [  128/25020]\n",
      "loss: 0.611001  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 83.0%\n",
      "Epoch 81\n",
      "-------------------------------\n",
      "loss: 0.672536  [  128/25020]\n",
      "loss: 0.600596  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 83.2%\n",
      "Epoch 82\n",
      "-------------------------------\n",
      "loss: 0.634225  [  128/25020]\n",
      "loss: 0.569232  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 83.5%\n",
      "Epoch 83\n",
      "-------------------------------\n",
      "loss: 0.604679  [  128/25020]\n",
      "loss: 0.520270  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 84.0%\n",
      "Epoch 84\n",
      "-------------------------------\n",
      "loss: 0.599761  [  128/25020]\n",
      "loss: 0.533747  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 84.3%\n",
      "Epoch 85\n",
      "-------------------------------\n",
      "loss: 0.569616  [  128/25020]\n",
      "loss: 0.544543  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 84.8%\n",
      "Epoch 86\n",
      "-------------------------------\n",
      "loss: 0.579371  [  128/25020]\n",
      "loss: 0.546742  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 85.2%\n",
      "Epoch 87\n",
      "-------------------------------\n",
      "loss: 0.517643  [  128/25020]\n",
      "loss: 0.495133  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 85.3%\n",
      "Epoch 88\n",
      "-------------------------------\n",
      "loss: 0.489958  [  128/25020]\n",
      "loss: 0.461480  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 85.4%\n",
      "Epoch 89\n",
      "-------------------------------\n",
      "loss: 0.576239  [  128/25020]\n",
      "loss: 0.509658  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 86.3%\n",
      "Epoch 90\n",
      "-------------------------------\n",
      "loss: 0.552706  [  128/25020]\n",
      "loss: 0.465107  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 87.4%\n",
      "Epoch 91\n",
      "-------------------------------\n",
      "loss: 0.503370  [  128/25020]\n",
      "loss: 0.364819  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 88.3%\n",
      "Epoch 92\n",
      "-------------------------------\n",
      "loss: 0.569067  [  128/25020]\n",
      "loss: 0.446479  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 88.5%\n",
      "Epoch 93\n",
      "-------------------------------\n",
      "loss: 0.465584  [  128/25020]\n",
      "loss: 0.341593  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 88.7%\n",
      "Epoch 94\n",
      "-------------------------------\n",
      "loss: 0.450444  [  128/25020]\n",
      "loss: 0.413852  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 88.9%\n",
      "Epoch 95\n",
      "-------------------------------\n",
      "loss: 0.394046  [  128/25020]\n",
      "loss: 0.378319  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.1%\n",
      "Epoch 96\n",
      "-------------------------------\n",
      "loss: 0.459312  [  128/25020]\n",
      "loss: 0.406006  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.2%\n",
      "Epoch 97\n",
      "-------------------------------\n",
      "loss: 0.522928  [  128/25020]\n",
      "loss: 0.393383  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.4%\n",
      "Epoch 98\n",
      "-------------------------------\n",
      "loss: 0.388904  [  128/25020]\n",
      "loss: 0.377261  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.4%\n",
      "Epoch 99\n",
      "-------------------------------\n",
      "loss: 0.433306  [  128/25020]\n",
      "loss: 0.342300  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.7%\n",
      "Epoch 100\n",
      "-------------------------------\n",
      "loss: 0.458107  [  128/25020]\n",
      "loss: 0.348600  [12928/25020]\n",
      "Train Error: \n",
      " Accuracy: 89.8%\n",
      "Test Error: \n",
      " Accuracy: 63.6%, Avg loss: 1.380482 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 91.8%, Avg loss: 0.334846 \n",
      "\n",
      "train: 0.9177857713828936 test: 0.6361\n",
      " Error: \n",
      " Accuracy: 78.5%  \n",
      "\n",
      "AUC value is: 0.6957481404788098\n",
      "Accuracy is: 0.61398\n",
      "AUC value is: 0.8020986813431561\n",
      "Accuracy is: 0.63492\n",
      "Test Error: \n",
      " Accuracy: 78.5%, Avg loss: 0.825477 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "(x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = load_CIFAR100(0, 100, prop_keep=0.5, seed=0)\n",
    "training_data = CustomDataset(x_train, y_train, train_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=batch_size)\n",
    "\n",
    "\n",
    "for l2_norm in l2_norm_list:\n",
    "    \n",
    "    TargetModel = ResNet(ResidualBlock, 100)\n",
    "    # print(TargetModel)\n",
    "    TargetModel.to(device)\n",
    "\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.SGD(TargetModel.parameters(), lr=LEARNING_RATE, momentum=0.9, weight_decay=l2_norm)\n",
    "    train_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[30, 50, 70, 90], gamma=0.2)\n",
    "    for t in range(100):\n",
    "        print(f\"Epoch {t+1}\\n-------------------------------\")\n",
    "        if t > 0:\n",
    "            train_scheduler.step(t+1)\n",
    "        train(train_dataloader, TargetModel, loss_fn, optimizer, device)\n",
    "    \n",
    "    test_acc = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "    train_acc = evaluate(train_dataloader, TargetModel, loss_fn, device)\n",
    "    test_acc_list.append(test_acc)\n",
    "    train_acc_list.append(train_acc)\n",
    "    print(\"train:\",train_acc,\"test:\",test_acc)\n",
    "    distance = train_acc - test_acc\n",
    "    gene_distance.append(distance)\n",
    "    \n",
    "    \n",
    "    x_test = X_data\n",
    "    y_test = Y_data\n",
    "    mem_label = train_keep[0]\n",
    "    # 加载目标模型\n",
    "    \n",
    "    all_data = CustomDataset(x_test, y_test, model_transform)\n",
    "    all_dataloader = DataLoader(all_data, batch_size=batch_size)\n",
    "    \n",
    "    conf_data, label_data = get_model_pred(all_dataloader, TargetModel, device)\n",
    "    conf_data = conf_data.detach().cpu().numpy()\n",
    "    label_data = label_data.detach().cpu().numpy()\n",
    "    conf_data = conf_data.astype(np.float64)\n",
    "    score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    \n",
    "    # 执行影子模型攻击\n",
    "    targetX = conf_data\n",
    "    pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "    targetY = mem_label\n",
    "    targetX, _ = get_top_k_conf(3, targetX, targetX)\n",
    "    targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "    targetX = targetX.astype(np.float32)\n",
    "    shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "    shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "    attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "    attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "    accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "    shadow_result.append(accuracy)\n",
    "    \n",
    "    # 执行风险评估攻击\n",
    "    score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    pri_risk_t = pri_risk_all\n",
    "    pri_risk_rank_t = np.argsort(pri_risk_t)\n",
    "    pri_risk_rank_t = np.flip(pri_risk_rank_t)\n",
    "    \n",
    "    pred_result = LIRA_attack(train_keep, score_all, score_tar, mem_label)\n",
    "    evaluate_ROC(pred_result, mem_label, threshold=0)\n",
    "    pred_clip = pred_result[pri_risk_rank_t[:x]]\n",
    "    mem_clip = mem_label[pri_risk_rank_t[:x]]\n",
    "    pred_clip = pred_clip > 0\n",
    "    accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "    LIRA_result.append(accuracy)\n",
    "    \n",
    "    \n",
    "    # 执行基线攻击\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "    accuracy = metrics.accuracy_score(train_keep[tar_model], pred_result)\n",
    "    base_result.append(accuracy)\n",
    "    \n",
    "    pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "    mem_clip = train_keep[tar_model][pri_risk_rank[:x]]\n",
    "    accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "    risk_base_result.append(accuracy)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "31c0aff5-7bc0-468b-9bb8-6a743d77ff22",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.6361]"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_acc_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "cd0412ba-4c5c-490e-a15d-df906d975349",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.9177857713828936]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_acc_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "50e2fbb2-7e48-400f-8fb8-f6cbe84c9b57",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.28168577138289363]"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gene_distance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "876334dc-f6ae-4563-afe8-aaa21a752d20",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.61398]"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "shadow_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "8f36bb33-e8be-4c83-896b-8e3da3f7d185",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.908]"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "LIRA_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "a6f1bc1a-f8fd-4bd7-a6f3-62e190e1872e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.65538]"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "base_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "9042cd64-f60c-4449-acbe-cb110d277486",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.726]"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "risk_base_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6824712-defc-4d77-b0aa-70b1b5b9e68f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "203abbb2-d2d4-403d-b705-91306f84e016",
   "metadata": {},
   "outputs": [],
   "source": [
    "df=pd.DataFrame({'norm': l2_norm_list, 'train_acc_list': train_acc_list, 'test_acc_list':test_acc_list, 'gene_distance':gene_distance, \n",
    "                 'shadow_result':shadow_result, 'LIRA_result':LIRA_result,  'base_result':base_result, 'risk_base_result':risk_base_result,})\n",
    "df.to_csv('CIFAR100_norm_att.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4dc8205f-a31d-4f2b-ade6-32417b88f65f",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "opacus",
   "language": "python",
   "name": "opacus"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
