{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c7d72eed",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import datasets\n",
    "from torchvision import transforms\n",
    "from torchvision.transforms import ToTensor\n",
    "import torchvision.transforms as tt\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import metrics\n",
    "\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "47f1bfb4-83af-47ee-a278-009584015dcf",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "376749b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入自己创建的python文件\n",
    "import sys\n",
    "sys.path.append(\"..\") # Adds higher directory to python modules path.\n",
    "from frame.DataProcess import *\n",
    "# from frame.ModelUtil import *\n",
    "# from frame.LIRAAttack import *\n",
    "# from frame.AttackUtil import *\n",
    "# from frame.ShadowAttack import *\n",
    "# from frame.ThresholdAttack import *\n",
    "# from frame.LabelAttack import *\n",
    "\n",
    "from Purchase100_util_Adv import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8f55d813",
   "metadata": {},
   "outputs": [],
   "source": [
    "LEARNING_RATE = 1e-2\n",
    "BATCH_SIZE = 128\n",
    "MODEL = 'ResNet18'\n",
    "EPOCHS = 100\n",
    "DATA_NAME = 'CIFAR10' \n",
    "weight_dir = os.path.join('..', 'weights_for_exp', DATA_NAME)\n",
    "num_shadowsets = 100\n",
    "seed = 0\n",
    "prop_keep = 0.5\n",
    "\n",
    "model_transform = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n",
    "    ])\n",
    "attack_transform = transforms.Compose([])\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4\n",
    "\n",
    "at_lr = 1e-4\n",
    "target = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1678c1d3-ff0a-4e3b-a499-03e31e092f24",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 加载完整的训练数据集\n",
    "X_data, Y_data, train_keep = load_CIFAR10_keep(num_shadowsets, prop_keep, seed)\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, shuffle=False)  #batch_size=1, "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d5a01fbb-39e0-41b6-9505-b134d90720c2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "test_dataset = datasets.cifar.CIFAR10(root='../datasets/cifar10', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data\n",
    "y_test_data = np.array(test_dataset.targets)\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c47ecfe1-29ab-47a1-8307-75036e3b0a00",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "X_all = []\n",
    "Y_all = []\n",
    "X_test = []\n",
    "Y_test = [] \n",
    "for item in all_dataloader: \n",
    "    X_all.append( item[0].numpy() )\n",
    "    Y_all.append( item[1].numpy()  )\n",
    "for item in test_dataloader:\n",
    "    X_test.append( item[0].numpy() )\n",
    "    Y_test.append( item[1].numpy()  )\n",
    "X_all = np.asarray(X_all)\n",
    "Y_all = np.asarray(Y_all)\n",
    "X_test = np.asarray(X_test)\n",
    "Y_test = np.asarray(Y_test)\n",
    "\n",
    "X_all = np.squeeze(X_all, axis=1)\n",
    "Y_all = np.squeeze(Y_all, axis=1)\n",
    "X_test = np.squeeze(X_test, axis=1)\n",
    "Y_test = np.squeeze(Y_test, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "90a9258e-4781-49c0-9448-dfa74ea87d06",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 3, 32, 32)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "e1dcb415-2df0-4c14-b9cd-b29b6f376463",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000,)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "9fdd3325",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 成员\n",
    "tr_cls_data = X_all[train_keep[target]]                      \n",
    "tr_cls_label = Y_all[train_keep[target]]\n",
    "tr_cls_data_tensor = torch.from_numpy(tr_cls_data).type(torch.FloatTensor)                        \n",
    "tr_cls_label_tensor = torch.from_numpy(tr_cls_label).type(torch.LongTensor)\n",
    "# 非成员\n",
    "ref_data = X_all[~train_keep[target]]\n",
    "ref_label = Y_all[~train_keep[target]]\n",
    "ref_data_tensor = torch.from_numpy(ref_data).type(torch.FloatTensor)                           \n",
    "ref_label_tensor = torch.from_numpy(ref_label).type(torch.LongTensor)\n",
    "\n",
    "val_data = X_test[:5000]\n",
    "val_label = Y_test[:5000]\n",
    "val_data_tensor = torch.from_numpy(val_data).type(torch.FloatTensor)\n",
    "val_label_tensor = torch.from_numpy(val_label).type(torch.LongTensor)\n",
    "\n",
    "te_data = X_test[5000:10000]\n",
    "te_label = Y_test[5000:10000]\n",
    "te_data_tensor = torch.from_numpy(te_data).type(torch.FloatTensor)\n",
    "te_label_tensor = torch.from_numpy(te_label).type(torch.LongTensor)\n",
    "\n",
    "\n",
    "tr_cls_te_at_data_tensor = tr_cls_data_tensor[:10000]\n",
    "tr_cls_te_at_label_tensor = tr_cls_label_tensor[:10000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "5e605d79",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ResidualBlock(nn.Module):\n",
    "    def __init__(self, inchannel, outchannel, stride=1):\n",
    "        super(ResidualBlock, self).__init__()\n",
    "        self.left = nn.Sequential(\n",
    "            nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel)\n",
    "        )\n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or inchannel != outchannel:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(outchannel)\n",
    "            )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.left(x)\n",
    "        out += self.shortcut(x)\n",
    "        out = F.relu(out)\n",
    "        return out\n",
    "\n",
    "class ResNet_Adv(nn.Module):\n",
    "    def __init__(self, ResidualBlock, num_classes=10):\n",
    "        super(ResNet_Adv, self).__init__()\n",
    "        self.inchannel = 64\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(),\n",
    "        )\n",
    "        self.layer1 = self.make_layer(ResidualBlock, 64,  2, stride=1)\n",
    "        self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n",
    "        self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n",
    "        self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n",
    "        self.fc = nn.Linear(512, num_classes)\n",
    "\n",
    "    def make_layer(self, block, channels, num_blocks, stride):\n",
    "        strides = [stride] + [1] * (num_blocks - 1)   #strides=[1,1]\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.inchannel, channels, stride))\n",
    "            self.inchannel = channels\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.layer1(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = F.avg_pool2d(out, 4)\n",
    "        hidden_out = out.view(out.size(0), -1)\n",
    "        logits = self.fc(hidden_out)\n",
    "        return logits, hidden_out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "cf2a1024-cfab-41e2-91fa-85ea4bec3afa",
   "metadata": {},
   "outputs": [],
   "source": [
    "schedule=[20,35]\n",
    "batch_size=64\n",
    "alpha=3\n",
    "lr=0.1\n",
    "gamma=0.1\n",
    "tr_epochs=50\n",
    "at_lr=0.0001\n",
    "at_schedule=[100]\n",
    "at_gamma=0.5\n",
    "at_epochs=200\n",
    "n_classes=10\n",
    "use_cuda=True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "1160b110",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ResNet_Adv(\n",
       "  (conv1): Sequential(\n",
       "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (2): ReLU()\n",
       "  )\n",
       "  (layer1): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer2): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer3): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer4): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (fc): Linear(in_features=512, out_features=10, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 目标模型\n",
    "model = ResNet_Adv(ResidualBlock, 10)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "model.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "55a9d3f0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "InferenceAttack_HZ(\n",
       "  (features): Sequential(\n",
       "    (0): Linear(in_features=10, out_features=1024, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=1024, out_features=512, bias=True)\n",
       "    (3): ReLU()\n",
       "    (4): Linear(in_features=512, out_features=64, bias=True)\n",
       "    (5): ReLU()\n",
       "  )\n",
       "  (labels): Sequential(\n",
       "    (0): Linear(in_features=10, out_features=128, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=128, out_features=64, bias=True)\n",
       "    (3): ReLU()\n",
       "  )\n",
       "  (combine): Sequential(\n",
       "    (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=512, out_features=256, bias=True)\n",
       "    (3): ReLU()\n",
       "    (4): Linear(in_features=256, out_features=128, bias=True)\n",
       "    (5): ReLU()\n",
       "    (6): Linear(in_features=128, out_features=64, bias=True)\n",
       "    (7): ReLU()\n",
       "    (8): Linear(in_features=64, out_features=1, bias=True)\n",
       "  )\n",
       "  (output): Sigmoid()\n",
       ")"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 攻击模型\n",
    "attack_model = InferenceAttack_HZ(n_classes)\n",
    "attack_optimizer=torch.optim.Adam(attack_model.parameters(),lr=at_lr)\n",
    "attack_criterion=nn.MSELoss()\n",
    "attack_model.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6c0a4ed7-5f1d-4cd5-affc-c65a374d371e",
   "metadata": {},
   "outputs": [],
   "source": [
    "weight_path = os.path.join(weight_dir, \"{}_AdvReg_protected_model.pth\".format(DATA_NAME))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "4f502e94",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----> NORMAL TRAINING MODE: c_batches 390 \n",
      "Initial test acc 34.42 train att acc 0.5004806151874399\n",
      "epoch 1 | tr_acc 38.09 | val acc 38.14 | best val acc 38.14 | best te acc 37.18 | attack avg acc 0.50 | attack val acc 0.51\n",
      "epoch 2 | tr_acc 44.57 | val acc 43.46 | best val acc 43.46 | best te acc 42.18 | attack avg acc 0.51 | attack val acc 0.51\n",
      "epoch 3 | tr_acc 53.23 | val acc 52.14 | best val acc 52.14 | best te acc 50.14 | attack avg acc 0.51 | attack val acc 0.51\n",
      "epoch 4 | tr_acc 58.92 | val acc 56.18 | best val acc 56.18 | best te acc 54.02 | attack avg acc 0.51 | attack val acc 0.52\n",
      "epoch 5 | tr_acc 64.04 | val acc 59.08 | best val acc 59.08 | best te acc 58.40 | attack avg acc 0.51 | attack val acc 0.52\n",
      "epoch 6 | tr_acc 65.60 | val acc 60.74 | best val acc 60.74 | best te acc 58.48 | attack avg acc 0.52 | attack val acc 0.52\n",
      "epoch 7 | tr_acc 65.16 | val acc 58.50 | best val acc 60.74 | best te acc 58.48 | attack avg acc 0.52 | attack val acc 0.53\n",
      "epoch 8 | tr_acc 72.09 | val acc 62.76 | best val acc 62.76 | best te acc 61.74 | attack avg acc 0.53 | attack val acc 0.54\n",
      "epoch 9 | tr_acc 75.42 | val acc 65.00 | best val acc 65.00 | best te acc 63.58 | attack avg acc 0.54 | attack val acc 0.55\n",
      "epoch 10 | tr_acc 74.86 | val acc 63.00 | best val acc 65.00 | best te acc 63.58 | attack avg acc 0.55 | attack val acc 0.55\n",
      "epoch 11 | tr_acc 76.72 | val acc 62.56 | best val acc 65.00 | best te acc 63.58 | attack avg acc 0.56 | attack val acc 0.57\n",
      "epoch 12 | tr_acc 79.93 | val acc 63.88 | best val acc 65.00 | best te acc 63.58 | attack avg acc 0.57 | attack val acc 0.58\n",
      "epoch 13 | tr_acc 82.40 | val acc 65.52 | best val acc 65.52 | best te acc 64.92 | attack avg acc 0.58 | attack val acc 0.59\n",
      "epoch 14 | tr_acc 84.26 | val acc 65.18 | best val acc 65.52 | best te acc 64.92 | attack avg acc 0.59 | attack val acc 0.59\n",
      "epoch 15 | tr_acc 83.96 | val acc 66.46 | best val acc 66.46 | best te acc 65.00 | attack avg acc 0.59 | attack val acc 0.59\n",
      "epoch 16 | tr_acc 80.25 | val acc 63.08 | best val acc 66.46 | best te acc 65.00 | attack avg acc 0.59 | attack val acc 0.59\n",
      "epoch 17 | tr_acc 80.24 | val acc 63.60 | best val acc 66.46 | best te acc 65.00 | attack avg acc 0.59 | attack val acc 0.58\n",
      "epoch 18 | tr_acc 85.70 | val acc 65.96 | best val acc 66.46 | best te acc 65.00 | attack avg acc 0.60 | attack val acc 0.60\n",
      "epoch 19 | tr_acc 88.15 | val acc 67.40 | best val acc 67.40 | best te acc 66.68 | attack avg acc 0.60 | attack val acc 0.61\n",
      "Epoch 20 Local lr 0.010000\n",
      "Epoch 20 lr 0.001000\n",
      "epoch 20 | tr_acc 91.33 | val acc 69.12 | best val acc 69.12 | best te acc 68.48 | attack avg acc 0.61 | attack val acc 0.62\n",
      "epoch 21 | tr_acc 91.87 | val acc 69.30 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.62 | attack val acc 0.63\n",
      "epoch 22 | tr_acc 92.41 | val acc 69.02 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.63 | attack val acc 0.63\n",
      "epoch 23 | tr_acc 92.72 | val acc 69.04 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.64 | attack val acc 0.64\n",
      "epoch 24 | tr_acc 92.86 | val acc 68.80 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.64 | attack val acc 0.63\n",
      "epoch 25 | tr_acc 92.99 | val acc 68.98 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.65 | attack val acc 0.64\n",
      "epoch 26 | tr_acc 93.07 | val acc 68.72 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.65 | attack val acc 0.64\n",
      "epoch 27 | tr_acc 93.11 | val acc 68.68 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.66 | attack val acc 0.64\n",
      "epoch 28 | tr_acc 93.15 | val acc 68.56 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.66 | attack val acc 0.64\n",
      "epoch 29 | tr_acc 93.33 | val acc 68.54 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.67 | attack val acc 0.65\n",
      "epoch 30 | tr_acc 93.14 | val acc 68.36 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.68 | attack val acc 0.65\n",
      "epoch 31 | tr_acc 93.19 | val acc 68.26 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.68 | attack val acc 0.65\n",
      "epoch 32 | tr_acc 93.12 | val acc 68.32 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.69 | attack val acc 0.65\n",
      "epoch 33 | tr_acc 93.17 | val acc 68.34 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.69 | attack val acc 0.65\n",
      "epoch 34 | tr_acc 93.05 | val acc 68.10 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.70 | attack val acc 0.66\n",
      "Epoch 35 Local lr 0.000100\n",
      "Epoch 35 lr 0.000010\n",
      "epoch 35 | tr_acc 93.07 | val acc 68.14 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.71 | attack val acc 0.67\n",
      "epoch 36 | tr_acc 93.07 | val acc 68.14 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.72 | attack val acc 0.67\n",
      "epoch 37 | tr_acc 93.07 | val acc 68.18 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.73 | attack val acc 0.68\n",
      "epoch 38 | tr_acc 93.07 | val acc 68.18 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.73 | attack val acc 0.67\n",
      "epoch 39 | tr_acc 93.07 | val acc 68.12 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.74 | attack val acc 0.67\n",
      "epoch 40 | tr_acc 93.07 | val acc 68.16 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.75 | attack val acc 0.68\n",
      "epoch 41 | tr_acc 93.09 | val acc 68.16 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.76 | attack val acc 0.69\n",
      "epoch 42 | tr_acc 93.07 | val acc 68.14 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.76 | attack val acc 0.69\n",
      "epoch 43 | tr_acc 93.06 | val acc 68.16 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.77 | attack val acc 0.69\n",
      "epoch 44 | tr_acc 93.05 | val acc 68.14 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.77 | attack val acc 0.69\n",
      "epoch 45 | tr_acc 93.06 | val acc 68.16 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.78 | attack val acc 0.70\n",
      "epoch 46 | tr_acc 93.05 | val acc 68.16 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.78 | attack val acc 0.69\n",
      "epoch 47 | tr_acc 93.05 | val acc 68.14 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.79 | attack val acc 0.71\n",
      "epoch 48 | tr_acc 93.05 | val acc 68.20 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.79 | attack val acc 0.71\n",
      "epoch 49 | tr_acc 93.06 | val acc 68.22 | best val acc 69.30 | best te acc 68.54 | attack avg acc 0.80 | attack val acc 0.71\n"
     ]
    }
   ],
   "source": [
    "best_acc=0\n",
    "best_test_acc=0    \n",
    "for epoch in range(50):\n",
    "    if epoch in schedule:\n",
    "        # decay the lr at certain epoches in schedule\n",
    "        for param_group in optimizer.param_groups:\n",
    "            param_group['lr'] *= gamma\n",
    "            print('Epoch %d Local lr %f'%(epoch,param_group['lr']))\n",
    "\n",
    "    c_batches = len(tr_cls_data_tensor)//batch_size\n",
    "    if epoch == 0:\n",
    "        print('----> NORMAL TRAINING MODE: c_batches %d '%(c_batches))\n",
    "\n",
    "\n",
    "        train_loss, train_acc = train(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                          model,criterion,optimizer,epoch,use_cuda,debug_='MEDIUM')    \n",
    "        test_loss, test_acc = test(te_data_tensor,te_label_tensor,model,criterion,use_cuda)    \n",
    "        for i in range(5):\n",
    "            at_loss, at_acc = train_attack(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                           ref_data_tensor,ref_label_tensor,model,attack_model,criterion,\n",
    "                                           attack_criterion,optimizer,attack_optimizer,epoch,use_cuda,debug_='MEDIUM')    \n",
    "\n",
    "        print('Initial test acc {} train att acc {}'.format(test_acc, at_acc))\n",
    "\n",
    "    else:\n",
    "\n",
    "        for e_num in schedule:\n",
    "            if e_num==epoch:\n",
    "                for param_group in optimizer.param_groups:\n",
    "                    param_group['lr'] *= gamma\n",
    "                    print('Epoch %d lr %f'%(epoch,param_group['lr']))\n",
    "\n",
    "        att_accs =[]\n",
    "\n",
    "        rounds=(c_batches//2)\n",
    "\n",
    "        for i in range(rounds): \n",
    "            at_loss, at_acc = train_attack(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                           ref_data_tensor,ref_label_tensor,\n",
    "                                           model,attack_model,criterion,attack_criterion,optimizer,\n",
    "                                           attack_optimizer,epoch,use_cuda,52,(i*52)%c_batches,batch_size=batch_size)\n",
    "\n",
    "            att_accs.append(at_acc)\n",
    "\n",
    "            tr_loss, tr_acc = train_privatly(tr_cls_data_tensor,tr_cls_label_tensor,model,\n",
    "                                             attack_model,criterion,optimizer,epoch,use_cuda,\n",
    "                                             2,(2*i)%c_batches,alpha=alpha,batch_size=batch_size)\n",
    "\n",
    "        train_loss,train_acc = test(tr_cls_data_tensor,tr_cls_label_tensor,model,criterion,use_cuda)\n",
    "        val_loss, val_acc = test(val_data_tensor,val_label_tensor,model,criterion,use_cuda)\n",
    "        is_best = (val_acc > best_acc)\n",
    "\n",
    "        if is_best:\n",
    "            _, best_test_acc = test(te_data_tensor,te_label_tensor,model,criterion,use_cuda)\n",
    "            torch.save(model.state_dict(), weight_path)\n",
    "\n",
    "\n",
    "        best_acc=max(val_acc, best_acc)\n",
    "\n",
    "        at_val_loss, at_val_acc = test_attack(tr_cls_te_at_data_tensor,tr_cls_te_at_label_tensor,\n",
    "                                                 te_data_tensor,te_label_tensor,\n",
    "                                                 model,attack_model,criterion,attack_criterion,\n",
    "                                                 optimizer,attack_optimizer,epoch,use_cuda,debug_='MEDIUM')\n",
    "\n",
    "        att_epoch_acc = np.mean(att_accs)\n",
    "\n",
    "        print('epoch %d | tr_acc %.2f | val acc %.2f | best val acc %.2f | best te acc %.2f | attack avg acc %.2f | attack val acc %.2f'%(epoch,train_acc,val_acc,best_acc,best_test_acc,att_epoch_acc,at_val_acc), flush=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "6c326b61-73d2-44b3-b00d-4cc891590a22",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), weight_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc489b00-3278-4c10-a88f-bf7690f21945",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e35ca9f8-ab30-4973-a75c-2857d7da8953",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e286bde-8423-43a4-8be2-c506eb97b7c1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1e371213-facb-47b8-bc14-ffaeddd363bb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "864adefd-0c83-41ba-8a26-d37ef30a19e4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cbaa5d3-46f7-4387-917e-f1abc9fbe07c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "812cc442-b82b-4bac-a53a-5f258314a56e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09cb17f3-db42-4a0d-80cb-5b14ef153e87",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85ca2309-488e-47d0-a4f3-354c45dc0739",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "92472ec7-2ab5-4689-9d93-1d74567962bd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00f163e7-cc10-4776-8cfb-ab0d82d465ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "898fdd41-aa83-470f-9340-737c6ca5926e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 创建对应的dotaloader\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, batch_size=BATCH_SIZE, shuffle=False)\n",
    "batch_size = BATCH_SIZE\n",
    "model = MODEL\n",
    "epochs = EPOCHS\n",
    "data_name = DATA_NAME \n",
    "weight_part = \"{}_{}_epoch{}_model\".format(data_name, model, epochs)\n",
    "loss_data_all = np.load('../outputs_save/Purchase100_limited_loss.npy')\n",
    "score_all = np.load('../outputs_save/Purchase100_limited_score.npy')\n",
    "conf_data_all = np.load('../outputs_save/Purchase100_limited_conf.npy')\n",
    "pri_risk_all = get_risk_score(loss_data_all, train_keep)\n",
    "pri_risk_rank = np.argsort(pri_risk_all)\n",
    "pri_risk_rank = np.flip(pri_risk_rank)\n",
    "# 训练影子攻击模型\n",
    "attack_model = shadow_attack(sha_models=sha_models, tar_model=tar_model, model_num=num_shadowsets, weight_dir=weight_dir, data_name=DATA_NAME, model=MODEL, model_transform=model_transform, \n",
    "                  model_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=attack_lr, attack_epochs=30, attack_transform=attack_transform, \n",
    "                  device=device, prop_keep=0.5, top_k=3, attack_class=attack_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6808b796-8911-48e8-811d-3c138ffdd1e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "top_risk = 2000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c18d295-8628-453d-b3ee-292c8ea0cebb",
   "metadata": {},
   "outputs": [],
   "source": [
    "TargetModel = PurchaseClassifier()\n",
    "weight_path = os.path.join(weight_dir, \"{}_AdvReg_protected_model.pth\".format(data_name))\n",
    "TargetModel.load_state_dict(torch.load(weight_path))\n",
    "TargetModel.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a6625034-f82f-400c-a9e9-2e93a0b9b564",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_fn = nn.CrossEntropyLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c579718-2fa9-4feb-993c-56a39087f77c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 执行基线攻击\n",
    "pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "accuracy = metrics.accuracy_score(train_keep[0], pred_result)\n",
    "print('average_base_attack',accuracy)\n",
    "pred_clip = pred_result[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print('risk_base_attack', accuracy)\n",
    "# 执行似然比攻击\n",
    "_, score = get_score_from_model(all_dataloader, TargetModel, device)\n",
    "pred_result = LIRA_attack(train_keep, score_all, score, train_keep[0])\n",
    "accuracy = evaluate_ROC(pred_result, train_keep[0], threshold=0)\n",
    "print('average_lira_attack',accuracy)\n",
    "pred_clip = pred_result[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print('risk_lira_attack',accuracy)\n",
    "# 执行影子模型攻击\n",
    "\n",
    "# 提取数据集在模型上的置信度输出\n",
    "targetX, _ = get_model_pred(all_dataloader, TargetModel, device)\n",
    "targetX = targetX.detach().cpu().numpy()\n",
    "targetX = targetX.astype(np.float32)\n",
    "\n",
    "top_k = 3\n",
    "if top_k:\n",
    "    # 仅使用概率向量的前3个值\n",
    "    targetX, _ = get_top_k_conf(top_k, targetX, targetX)\n",
    "\n",
    "shadow_attack_data = CustomDataset(targetX, train_keep[0], attack_transform)\n",
    "shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "\n",
    "pred_clip = attack_test_scores[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "accuracy = evaluate_ROC(pred_clip, mem_clip)\n",
    "print('risk_shadow_attack',accuracy)\n",
    "\n",
    "accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "print('average_shadow_attack',accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9870c0ca-4bb3-4c82-a5fb-d71f83918c09",
   "metadata": {},
   "outputs": [],
   "source": [
    "correct = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "print('model_test_correct',correct)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45e6c2ce-097e-4aad-879c-7e3a343d38b5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "94ad2d94-9a36-4894-bbad-b30375a0b218",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "opacus",
   "language": "python",
   "name": "opacus"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
