{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c7d72eed",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import datasets\n",
    "from torchvision import transforms\n",
    "from torchvision.transforms import ToTensor\n",
    "import torchvision.transforms as tt\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import metrics\n",
    "\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "47f1bfb4-83af-47ee-a278-009584015dcf",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "376749b0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入自己创建的python文件\n",
    "import sys\n",
    "sys.path.append(\"..\") # Adds higher directory to python modules path.\n",
    "from frame.DataProcess import *\n",
    "# from frame.ModelUtil import *\n",
    "# from frame.LIRAAttack import *\n",
    "# from frame.AttackUtil import *\n",
    "# from frame.ShadowAttack import *\n",
    "# from frame.ThresholdAttack import *\n",
    "# from frame.LabelAttack import *\n",
    "\n",
    "from Purchase100_util_Adv import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8f55d813",
   "metadata": {},
   "outputs": [],
   "source": [
    "LEARNING_RATE = 1e-2\n",
    "BATCH_SIZE = 128\n",
    "MODEL = 'ResNet18'\n",
    "EPOCHS = 100\n",
    "DATA_NAME = 'CIFAR100' \n",
    "weight_dir = os.path.join('..', 'weights_for_exp', DATA_NAME)\n",
    "num_shadowsets = 100\n",
    "seed = 0\n",
    "prop_keep = 0.5\n",
    "\n",
    "model_transform = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.507, 0.487, 0.441], std=[0.267, 0.256, 0.276])\n",
    "    ])\n",
    "attack_transform = transforms.Compose([])\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4\n",
    "\n",
    "at_lr = 1e-4\n",
    "target = 0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "1678c1d3-ff0a-4e3b-a499-03e31e092f24",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n",
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "# 加载完整的训练数据集\n",
    "X_data, Y_data, train_keep = load_CIFAR100_keep(num_shadowsets, prop_keep, seed)\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, shuffle=False)  #batch_size=1, "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "d5a01fbb-39e0-41b6-9505-b134d90720c2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Files already downloaded and verified\n"
     ]
    }
   ],
   "source": [
    "test_dataset = datasets.cifar.CIFAR100(root='../datasets/cifar100', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data\n",
    "y_test_data = np.array(test_dataset.targets)\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "c47ecfe1-29ab-47a1-8307-75036e3b0a00",
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "X_all = []\n",
    "Y_all = []\n",
    "X_test = []\n",
    "Y_test = [] \n",
    "for item in all_dataloader: \n",
    "    X_all.append( item[0].numpy() )\n",
    "    Y_all.append( item[1].numpy()  )\n",
    "for item in test_dataloader:\n",
    "    X_test.append( item[0].numpy() )\n",
    "    Y_test.append( item[1].numpy()  )\n",
    "X_all = np.asarray(X_all)\n",
    "Y_all = np.asarray(Y_all)\n",
    "X_test = np.asarray(X_test)\n",
    "Y_test = np.asarray(Y_test)\n",
    "\n",
    "X_all = np.squeeze(X_all, axis=1)\n",
    "Y_all = np.squeeze(Y_all, axis=1)\n",
    "X_test = np.squeeze(X_test, axis=1)\n",
    "Y_test = np.squeeze(Y_test, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "90a9258e-4781-49c0-9448-dfa74ea87d06",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 3, 32, 32)"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "e1dcb415-2df0-4c14-b9cd-b29b6f376463",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000,)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Y_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "9fdd3325",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 成员\n",
    "tr_cls_data = X_all[train_keep[target]]                      \n",
    "tr_cls_label = Y_all[train_keep[target]]\n",
    "tr_cls_data_tensor = torch.from_numpy(tr_cls_data).type(torch.FloatTensor)                        \n",
    "tr_cls_label_tensor = torch.from_numpy(tr_cls_label).type(torch.LongTensor)\n",
    "# 非成员\n",
    "ref_data = X_all[~train_keep[target]]\n",
    "ref_label = Y_all[~train_keep[target]]\n",
    "ref_data_tensor = torch.from_numpy(ref_data).type(torch.FloatTensor)                           \n",
    "ref_label_tensor = torch.from_numpy(ref_label).type(torch.LongTensor)\n",
    "\n",
    "val_data = X_test[:5000]\n",
    "val_label = Y_test[:5000]\n",
    "val_data_tensor = torch.from_numpy(val_data).type(torch.FloatTensor)\n",
    "val_label_tensor = torch.from_numpy(val_label).type(torch.LongTensor)\n",
    "\n",
    "te_data = X_test[5000:10000]\n",
    "te_label = Y_test[5000:10000]\n",
    "te_data_tensor = torch.from_numpy(te_data).type(torch.FloatTensor)\n",
    "te_label_tensor = torch.from_numpy(te_label).type(torch.LongTensor)\n",
    "\n",
    "\n",
    "tr_cls_te_at_data_tensor = tr_cls_data_tensor[:10000]\n",
    "tr_cls_te_at_label_tensor = tr_cls_label_tensor[:10000]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "5e605d79",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ResidualBlock(nn.Module):\n",
    "    def __init__(self, inchannel, outchannel, stride=1):\n",
    "        super(ResidualBlock, self).__init__()\n",
    "        self.left = nn.Sequential(\n",
    "            nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel),\n",
    "            nn.ReLU(inplace=True),\n",
    "            nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(outchannel)\n",
    "        )\n",
    "        self.shortcut = nn.Sequential()\n",
    "        if stride != 1 or inchannel != outchannel:\n",
    "            self.shortcut = nn.Sequential(\n",
    "                nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False),\n",
    "                nn.BatchNorm2d(outchannel)\n",
    "            )\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.left(x)\n",
    "        out += self.shortcut(x)\n",
    "        out = F.relu(out)\n",
    "        return out\n",
    "\n",
    "class ResNet_Adv(nn.Module):\n",
    "    def __init__(self, ResidualBlock, num_classes=10):\n",
    "        super(ResNet_Adv, self).__init__()\n",
    "        self.inchannel = 64\n",
    "        self.conv1 = nn.Sequential(\n",
    "            nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False),\n",
    "            nn.BatchNorm2d(64),\n",
    "            nn.ReLU(),\n",
    "        )\n",
    "        self.layer1 = self.make_layer(ResidualBlock, 64,  2, stride=1)\n",
    "        self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2)\n",
    "        self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2)\n",
    "        self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2)\n",
    "        self.fc = nn.Linear(512, num_classes)\n",
    "\n",
    "    def make_layer(self, block, channels, num_blocks, stride):\n",
    "        strides = [stride] + [1] * (num_blocks - 1)   #strides=[1,1]\n",
    "        layers = []\n",
    "        for stride in strides:\n",
    "            layers.append(block(self.inchannel, channels, stride))\n",
    "            self.inchannel = channels\n",
    "        return nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.conv1(x)\n",
    "        out = self.layer1(out)\n",
    "        out = self.layer2(out)\n",
    "        out = self.layer3(out)\n",
    "        out = self.layer4(out)\n",
    "        out = F.avg_pool2d(out, 4)\n",
    "        hidden_out = out.view(out.size(0), -1)\n",
    "        logits = self.fc(hidden_out)\n",
    "        return logits, hidden_out\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "cf2a1024-cfab-41e2-91fa-85ea4bec3afa",
   "metadata": {},
   "outputs": [],
   "source": [
    "schedule=[20,35]\n",
    "batch_size=64\n",
    "alpha=3\n",
    "lr=0.1\n",
    "gamma=0.1\n",
    "tr_epochs=50\n",
    "at_lr=0.0001\n",
    "at_schedule=[100]\n",
    "at_gamma=0.5\n",
    "at_epochs=200\n",
    "n_classes=100\n",
    "use_cuda=True"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "1160b110",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ResNet_Adv(\n",
       "  (conv1): Sequential(\n",
       "    (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "    (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "    (2): ReLU()\n",
       "  )\n",
       "  (layer1): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer2): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer3): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (layer4): Sequential(\n",
       "    (0): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): ResidualBlock(\n",
       "      (left): Sequential(\n",
       "        (0): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "        (3): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "        (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "      (shortcut): Sequential()\n",
       "    )\n",
       "  )\n",
       "  (fc): Linear(in_features=512, out_features=100, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 目标模型\n",
    "model = ResNet_Adv(ResidualBlock, 100)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "model.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "55a9d3f0",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "InferenceAttack_HZ(\n",
       "  (features): Sequential(\n",
       "    (0): Linear(in_features=100, out_features=1024, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=1024, out_features=512, bias=True)\n",
       "    (3): ReLU()\n",
       "    (4): Linear(in_features=512, out_features=64, bias=True)\n",
       "    (5): ReLU()\n",
       "  )\n",
       "  (labels): Sequential(\n",
       "    (0): Linear(in_features=100, out_features=128, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=128, out_features=64, bias=True)\n",
       "    (3): ReLU()\n",
       "  )\n",
       "  (combine): Sequential(\n",
       "    (0): Linear(in_features=128, out_features=512, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=512, out_features=256, bias=True)\n",
       "    (3): ReLU()\n",
       "    (4): Linear(in_features=256, out_features=128, bias=True)\n",
       "    (5): ReLU()\n",
       "    (6): Linear(in_features=128, out_features=64, bias=True)\n",
       "    (7): ReLU()\n",
       "    (8): Linear(in_features=64, out_features=1, bias=True)\n",
       "  )\n",
       "  (output): Sigmoid()\n",
       ")"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 攻击模型\n",
    "attack_model = InferenceAttack_HZ(n_classes)\n",
    "attack_optimizer=torch.optim.Adam(attack_model.parameters(),lr=at_lr)\n",
    "attack_criterion=nn.MSELoss()\n",
    "attack_model.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6c0a4ed7-5f1d-4cd5-affc-c65a374d371e",
   "metadata": {},
   "outputs": [],
   "source": [
    "weight_path = os.path.join(weight_dir, \"{}_AdvReg_protected_model.pth\".format(DATA_NAME))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "4f502e94",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "----> NORMAL TRAINING MODE: c_batches 390 \n",
      "Initial test acc 0.82 train att acc 0.506548381928869\n",
      "epoch 1 | tr_acc 1.00 | val acc 1.06 | best val acc 1.06 | best te acc 0.94 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 2 | tr_acc 1.65 | val acc 1.90 | best val acc 1.90 | best te acc 1.54 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 3 | tr_acc 2.29 | val acc 2.44 | best val acc 2.44 | best te acc 2.06 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 4 | tr_acc 2.53 | val acc 2.70 | best val acc 2.70 | best te acc 2.54 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 5 | tr_acc 2.64 | val acc 2.68 | best val acc 2.70 | best te acc 2.54 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 6 | tr_acc 4.06 | val acc 3.88 | best val acc 3.88 | best te acc 3.74 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 7 | tr_acc 5.16 | val acc 5.02 | best val acc 5.02 | best te acc 5.02 | attack avg acc 0.52 | attack val acc 0.52\n",
      "epoch 8 | tr_acc 5.50 | val acc 5.26 | best val acc 5.26 | best te acc 5.58 | attack avg acc 0.52 | attack val acc 0.52\n",
      "epoch 9 | tr_acc 5.55 | val acc 5.50 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.52 | attack val acc 0.52\n",
      "epoch 10 | tr_acc 5.53 | val acc 5.46 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.52 | attack val acc 0.51\n",
      "epoch 11 | tr_acc 5.78 | val acc 5.48 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.53 | attack val acc 0.51\n",
      "epoch 12 | tr_acc 6.02 | val acc 5.46 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.53 | attack val acc 0.51\n",
      "epoch 13 | tr_acc 5.99 | val acc 5.14 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.53 | attack val acc 0.52\n",
      "epoch 14 | tr_acc 5.60 | val acc 5.14 | best val acc 5.50 | best te acc 5.42 | attack avg acc 0.53 | attack val acc 0.52\n",
      "epoch 15 | tr_acc 7.37 | val acc 6.68 | best val acc 6.68 | best te acc 6.42 | attack avg acc 0.54 | attack val acc 0.53\n",
      "epoch 16 | tr_acc 7.71 | val acc 6.22 | best val acc 6.68 | best te acc 6.42 | attack avg acc 0.54 | attack val acc 0.53\n",
      "epoch 17 | tr_acc 7.57 | val acc 6.56 | best val acc 6.68 | best te acc 6.42 | attack avg acc 0.55 | attack val acc 0.54\n",
      "epoch 18 | tr_acc 7.98 | val acc 6.46 | best val acc 6.68 | best te acc 6.42 | attack avg acc 0.56 | attack val acc 0.56\n",
      "epoch 19 | tr_acc 8.25 | val acc 6.32 | best val acc 6.68 | best te acc 6.42 | attack avg acc 0.57 | attack val acc 0.56\n",
      "Epoch 20 Local lr 0.010000\n",
      "Epoch 20 lr 0.001000\n",
      "epoch 20 | tr_acc 11.61 | val acc 7.12 | best val acc 7.12 | best te acc 7.76 | attack avg acc 0.58 | attack val acc 0.58\n",
      "epoch 21 | tr_acc 12.28 | val acc 7.36 | best val acc 7.36 | best te acc 8.08 | attack avg acc 0.59 | attack val acc 0.59\n",
      "epoch 22 | tr_acc 12.67 | val acc 7.16 | best val acc 7.36 | best te acc 8.08 | attack avg acc 0.60 | attack val acc 0.59\n",
      "epoch 23 | tr_acc 12.96 | val acc 7.52 | best val acc 7.52 | best te acc 8.16 | attack avg acc 0.60 | attack val acc 0.59\n",
      "epoch 24 | tr_acc 13.34 | val acc 7.56 | best val acc 7.56 | best te acc 8.42 | attack avg acc 0.61 | attack val acc 0.60\n",
      "epoch 25 | tr_acc 13.66 | val acc 7.56 | best val acc 7.56 | best te acc 8.42 | attack avg acc 0.61 | attack val acc 0.60\n",
      "epoch 26 | tr_acc 13.96 | val acc 7.86 | best val acc 7.86 | best te acc 8.52 | attack avg acc 0.62 | attack val acc 0.61\n",
      "epoch 27 | tr_acc 14.18 | val acc 8.00 | best val acc 8.00 | best te acc 8.76 | attack avg acc 0.62 | attack val acc 0.61\n",
      "epoch 28 | tr_acc 14.45 | val acc 8.00 | best val acc 8.00 | best te acc 8.76 | attack avg acc 0.63 | attack val acc 0.61\n",
      "epoch 29 | tr_acc 14.65 | val acc 7.76 | best val acc 8.00 | best te acc 8.76 | attack avg acc 0.63 | attack val acc 0.61\n",
      "epoch 30 | tr_acc 14.70 | val acc 7.90 | best val acc 8.00 | best te acc 8.76 | attack avg acc 0.64 | attack val acc 0.61\n",
      "epoch 31 | tr_acc 14.78 | val acc 8.08 | best val acc 8.08 | best te acc 8.94 | attack avg acc 0.64 | attack val acc 0.62\n",
      "epoch 32 | tr_acc 15.15 | val acc 8.08 | best val acc 8.08 | best te acc 8.94 | attack avg acc 0.64 | attack val acc 0.62\n",
      "epoch 33 | tr_acc 15.28 | val acc 8.26 | best val acc 8.26 | best te acc 9.12 | attack avg acc 0.65 | attack val acc 0.62\n",
      "epoch 34 | tr_acc 15.43 | val acc 8.36 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.65 | attack val acc 0.62\n",
      "Epoch 35 Local lr 0.000100\n",
      "Epoch 35 lr 0.000010\n",
      "epoch 35 | tr_acc 15.47 | val acc 8.34 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.66 | attack val acc 0.62\n",
      "epoch 36 | tr_acc 15.48 | val acc 8.36 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.66 | attack val acc 0.62\n",
      "epoch 37 | tr_acc 15.44 | val acc 8.34 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.66 | attack val acc 0.62\n",
      "epoch 38 | tr_acc 15.45 | val acc 8.28 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.67 | attack val acc 0.62\n",
      "epoch 39 | tr_acc 15.47 | val acc 8.28 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.67 | attack val acc 0.62\n",
      "epoch 40 | tr_acc 15.46 | val acc 8.26 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.68 | attack val acc 0.63\n",
      "epoch 41 | tr_acc 15.47 | val acc 8.26 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.68 | attack val acc 0.63\n",
      "epoch 42 | tr_acc 15.48 | val acc 8.32 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.68 | attack val acc 0.63\n",
      "epoch 43 | tr_acc 15.46 | val acc 8.30 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.69 | attack val acc 0.64\n",
      "epoch 44 | tr_acc 15.46 | val acc 8.28 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.69 | attack val acc 0.63\n",
      "epoch 45 | tr_acc 15.46 | val acc 8.26 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.70 | attack val acc 0.64\n",
      "epoch 46 | tr_acc 15.47 | val acc 8.26 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.70 | attack val acc 0.64\n",
      "epoch 47 | tr_acc 15.46 | val acc 8.26 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.70 | attack val acc 0.64\n",
      "epoch 48 | tr_acc 15.48 | val acc 8.28 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.71 | attack val acc 0.64\n",
      "epoch 49 | tr_acc 15.44 | val acc 8.28 | best val acc 8.36 | best te acc 9.16 | attack avg acc 0.71 | attack val acc 0.65\n"
     ]
    }
   ],
   "source": [
    "best_acc=0\n",
    "best_test_acc=0    \n",
    "for epoch in range(50):\n",
    "    if epoch in schedule:\n",
    "        # decay the lr at certain epoches in schedule\n",
    "        for param_group in optimizer.param_groups:\n",
    "            param_group['lr'] *= gamma\n",
    "            print('Epoch %d Local lr %f'%(epoch,param_group['lr']))\n",
    "\n",
    "    c_batches = len(tr_cls_data_tensor)//batch_size\n",
    "    if epoch == 0:\n",
    "        print('----> NORMAL TRAINING MODE: c_batches %d '%(c_batches))\n",
    "\n",
    "\n",
    "        train_loss, train_acc = train(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                          model,criterion,optimizer,epoch,use_cuda,debug_='MEDIUM')    \n",
    "        test_loss, test_acc = test(te_data_tensor,te_label_tensor,model,criterion,use_cuda)    \n",
    "        for i in range(5):\n",
    "            at_loss, at_acc = train_attack(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                           ref_data_tensor,ref_label_tensor,model,attack_model,criterion,\n",
    "                                           attack_criterion,optimizer,attack_optimizer,epoch,use_cuda,debug_='MEDIUM')    \n",
    "\n",
    "        print('Initial test acc {} train att acc {}'.format(test_acc, at_acc))\n",
    "\n",
    "    else:\n",
    "\n",
    "        for e_num in schedule:\n",
    "            if e_num==epoch:\n",
    "                for param_group in optimizer.param_groups:\n",
    "                    param_group['lr'] *= gamma\n",
    "                    print('Epoch %d lr %f'%(epoch,param_group['lr']))\n",
    "\n",
    "        att_accs =[]\n",
    "\n",
    "        rounds=(c_batches//2)\n",
    "\n",
    "        for i in range(rounds): \n",
    "            at_loss, at_acc = train_attack(tr_cls_data_tensor,tr_cls_label_tensor,\n",
    "                                           ref_data_tensor,ref_label_tensor,\n",
    "                                           model,attack_model,criterion,attack_criterion,optimizer,\n",
    "                                           attack_optimizer,epoch,use_cuda,52,(i*52)%c_batches,batch_size=batch_size)\n",
    "\n",
    "            att_accs.append(at_acc)\n",
    "\n",
    "            tr_loss, tr_acc = train_privatly(tr_cls_data_tensor,tr_cls_label_tensor,model,\n",
    "                                             attack_model,criterion,optimizer,epoch,use_cuda,\n",
    "                                             2,(2*i)%c_batches,alpha=alpha,batch_size=batch_size)\n",
    "\n",
    "        train_loss,train_acc = test(tr_cls_data_tensor,tr_cls_label_tensor,model,criterion,use_cuda)\n",
    "        val_loss, val_acc = test(val_data_tensor,val_label_tensor,model,criterion,use_cuda)\n",
    "        is_best = (val_acc > best_acc)\n",
    "\n",
    "        if is_best:\n",
    "            _, best_test_acc = test(te_data_tensor,te_label_tensor,model,criterion,use_cuda)\n",
    "            torch.save(model.state_dict(), weight_path)\n",
    "\n",
    "\n",
    "        best_acc=max(val_acc, best_acc)\n",
    "\n",
    "        at_val_loss, at_val_acc = test_attack(tr_cls_te_at_data_tensor,tr_cls_te_at_label_tensor,\n",
    "                                                 te_data_tensor,te_label_tensor,\n",
    "                                                 model,attack_model,criterion,attack_criterion,\n",
    "                                                 optimizer,attack_optimizer,epoch,use_cuda,debug_='MEDIUM')\n",
    "\n",
    "        att_epoch_acc = np.mean(att_accs)\n",
    "\n",
    "        print('epoch %d | tr_acc %.2f | val acc %.2f | best val acc %.2f | best te acc %.2f | attack avg acc %.2f | attack val acc %.2f'%(epoch,train_acc,val_acc,best_acc,best_test_acc,att_epoch_acc,at_val_acc), flush=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "6c326b61-73d2-44b3-b00d-4cc891590a22",
   "metadata": {},
   "outputs": [],
   "source": [
    "torch.save(model.state_dict(), weight_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc489b00-3278-4c10-a88f-bf7690f21945",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "db286dc3-7515-4d39-a948-6df14839adb5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b4b2f2bf-59c2-4144-a298-ae57238ffb12",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bd14d0f9-8c91-479b-bd4f-2bea9b423181",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1bd395b5-5a48-422e-aedd-770d4ce27b67",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2d7f23cd-c7b8-49a0-aaa8-badeab694b96",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9167945a-1f10-4b3b-9d37-eb0ae0e99034",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6af0cf8f-0b01-4a3e-8114-476f5a77dab4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7e68be0c-88dd-440c-96a6-e61511f58d36",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "00f163e7-cc10-4776-8cfb-ab0d82d465ec",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "898fdd41-aa83-470f-9340-737c6ca5926e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " Error: \n",
      " Accuracy: 100.0%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 86.1%  \n",
      "\n",
      "(60000, 100) (60000,) (60000,)\n",
      " Error: \n",
      " Accuracy: 100.0%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 86.5%  \n",
      "\n",
      "(60000, 100) (60000,) (60000,)\n",
      " Error: \n",
      " Accuracy: 100.0%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 85.6%  \n",
      "\n",
      "(60000, 100) (60000,) (60000,)\n",
      " Error: \n",
      " Accuracy: 100.0%  \n",
      "\n",
      " Error: \n",
      " Accuracy: 86.4%  \n",
      "\n",
      "test data: (60000, 100) (60000,) (60000,)\n",
      "(180000, 100) (180000,)\n",
      "Attack_NN(\n",
      "  (linear_relu_stack): Sequential(\n",
      "    (0): Linear(in_features=3, out_features=128, bias=True)\n",
      "    (1): ReLU()\n",
      "    (2): Linear(in_features=128, out_features=64, bias=True)\n",
      "    (3): ReLU()\n",
      "    (4): Linear(in_features=64, out_features=1, bias=True)\n",
      "  )\n",
      ")\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 0.692178  [  128/180000]\n",
      "loss: 0.679669  [12928/180000]\n",
      "loss: 0.638517  [25728/180000]\n",
      "loss: 0.671093  [38528/180000]\n",
      "loss: 0.649479  [51328/180000]\n",
      "loss: 0.616589  [64128/180000]\n",
      "loss: 0.645340  [76928/180000]\n",
      "loss: 0.608652  [89728/180000]\n",
      "loss: 0.664588  [102528/180000]\n",
      "loss: 0.635919  [115328/180000]\n",
      "loss: 0.627693  [128128/180000]\n",
      "loss: 0.626373  [140928/180000]\n",
      "loss: 0.603865  [153728/180000]\n",
      "loss: 0.601968  [166528/180000]\n",
      "loss: 0.564938  [179328/180000]\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.607695  [  128/180000]\n",
      "loss: 0.589512  [12928/180000]\n",
      "loss: 0.625688  [25728/180000]\n",
      "loss: 0.598496  [38528/180000]\n",
      "loss: 0.597043  [51328/180000]\n",
      "loss: 0.584404  [64128/180000]\n",
      "loss: 0.579402  [76928/180000]\n",
      "loss: 0.587012  [89728/180000]\n",
      "loss: 0.587760  [102528/180000]\n",
      "loss: 0.538621  [115328/180000]\n",
      "loss: 0.566334  [128128/180000]\n",
      "loss: 0.592615  [140928/180000]\n",
      "loss: 0.645705  [153728/180000]\n",
      "loss: 0.570828  [166528/180000]\n",
      "loss: 0.617829  [179328/180000]\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.557973  [  128/180000]\n",
      "loss: 0.600727  [12928/180000]\n",
      "loss: 0.648576  [25728/180000]\n",
      "loss: 0.644039  [38528/180000]\n",
      "loss: 0.602370  [51328/180000]\n",
      "loss: 0.591716  [64128/180000]\n",
      "loss: 0.564164  [76928/180000]\n",
      "loss: 0.549686  [89728/180000]\n",
      "loss: 0.574960  [102528/180000]\n",
      "loss: 0.613366  [115328/180000]\n",
      "loss: 0.572345  [128128/180000]\n",
      "loss: 0.578038  [140928/180000]\n",
      "loss: 0.569988  [153728/180000]\n",
      "loss: 0.553373  [166528/180000]\n",
      "loss: 0.597740  [179328/180000]\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.556713  [  128/180000]\n",
      "loss: 0.557935  [12928/180000]\n",
      "loss: 0.636687  [25728/180000]\n",
      "loss: 0.574605  [38528/180000]\n",
      "loss: 0.541326  [51328/180000]\n",
      "loss: 0.570857  [64128/180000]\n",
      "loss: 0.579717  [76928/180000]\n",
      "loss: 0.520429  [89728/180000]\n",
      "loss: 0.577327  [102528/180000]\n",
      "loss: 0.568543  [115328/180000]\n",
      "loss: 0.583936  [128128/180000]\n",
      "loss: 0.589795  [140928/180000]\n",
      "loss: 0.568992  [153728/180000]\n",
      "loss: 0.560260  [166528/180000]\n",
      "loss: 0.568070  [179328/180000]\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.480818  [  128/180000]\n",
      "loss: 0.561312  [12928/180000]\n",
      "loss: 0.572248  [25728/180000]\n",
      "loss: 0.586159  [38528/180000]\n",
      "loss: 0.560196  [51328/180000]\n",
      "loss: 0.548017  [64128/180000]\n",
      "loss: 0.634654  [76928/180000]\n",
      "loss: 0.602951  [89728/180000]\n",
      "loss: 0.577776  [102528/180000]\n",
      "loss: 0.540998  [115328/180000]\n",
      "loss: 0.563018  [128128/180000]\n",
      "loss: 0.567193  [140928/180000]\n",
      "loss: 0.573698  [153728/180000]\n",
      "loss: 0.513808  [166528/180000]\n",
      "loss: 0.583898  [179328/180000]\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.542175  [  128/180000]\n",
      "loss: 0.577052  [12928/180000]\n",
      "loss: 0.549741  [25728/180000]\n",
      "loss: 0.520761  [38528/180000]\n",
      "loss: 0.540878  [51328/180000]\n",
      "loss: 0.546975  [64128/180000]\n",
      "loss: 0.525636  [76928/180000]\n",
      "loss: 0.541622  [89728/180000]\n",
      "loss: 0.563217  [102528/180000]\n",
      "loss: 0.548091  [115328/180000]\n",
      "loss: 0.506990  [128128/180000]\n",
      "loss: 0.588633  [140928/180000]\n",
      "loss: 0.515701  [153728/180000]\n",
      "loss: 0.571733  [166528/180000]\n",
      "loss: 0.610672  [179328/180000]\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.549782  [  128/180000]\n",
      "loss: 0.583193  [12928/180000]\n",
      "loss: 0.562400  [25728/180000]\n",
      "loss: 0.544498  [38528/180000]\n",
      "loss: 0.548601  [51328/180000]\n",
      "loss: 0.553747  [64128/180000]\n",
      "loss: 0.552036  [76928/180000]\n",
      "loss: 0.457663  [89728/180000]\n",
      "loss: 0.548108  [102528/180000]\n",
      "loss: 0.520736  [115328/180000]\n",
      "loss: 0.512336  [128128/180000]\n",
      "loss: 0.508384  [140928/180000]\n",
      "loss: 0.579815  [153728/180000]\n",
      "loss: 0.583940  [166528/180000]\n",
      "loss: 0.519881  [179328/180000]\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.576850  [  128/180000]\n",
      "loss: 0.544228  [12928/180000]\n",
      "loss: 0.539137  [25728/180000]\n",
      "loss: 0.525447  [38528/180000]\n",
      "loss: 0.540975  [51328/180000]\n",
      "loss: 0.571383  [64128/180000]\n",
      "loss: 0.578074  [76928/180000]\n",
      "loss: 0.600654  [89728/180000]\n",
      "loss: 0.629757  [102528/180000]\n",
      "loss: 0.568104  [115328/180000]\n",
      "loss: 0.514361  [128128/180000]\n",
      "loss: 0.574920  [140928/180000]\n",
      "loss: 0.557598  [153728/180000]\n",
      "loss: 0.576352  [166528/180000]\n",
      "loss: 0.529656  [179328/180000]\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.598573  [  128/180000]\n",
      "loss: 0.541129  [12928/180000]\n",
      "loss: 0.542118  [25728/180000]\n",
      "loss: 0.500200  [38528/180000]\n",
      "loss: 0.557113  [51328/180000]\n",
      "loss: 0.522864  [64128/180000]\n",
      "loss: 0.570460  [76928/180000]\n",
      "loss: 0.537543  [89728/180000]\n",
      "loss: 0.562481  [102528/180000]\n",
      "loss: 0.486002  [115328/180000]\n",
      "loss: 0.581082  [128128/180000]\n",
      "loss: 0.524254  [140928/180000]\n",
      "loss: 0.497224  [153728/180000]\n",
      "loss: 0.531130  [166528/180000]\n",
      "loss: 0.602292  [179328/180000]\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.527199  [  128/180000]\n",
      "loss: 0.528569  [12928/180000]\n",
      "loss: 0.585492  [25728/180000]\n",
      "loss: 0.556861  [38528/180000]\n",
      "loss: 0.516679  [51328/180000]\n",
      "loss: 0.553512  [64128/180000]\n",
      "loss: 0.555788  [76928/180000]\n",
      "loss: 0.549020  [89728/180000]\n",
      "loss: 0.532064  [102528/180000]\n",
      "loss: 0.528076  [115328/180000]\n",
      "loss: 0.527173  [128128/180000]\n",
      "loss: 0.519394  [140928/180000]\n",
      "loss: 0.557099  [153728/180000]\n",
      "loss: 0.524646  [166528/180000]\n",
      "loss: 0.494473  [179328/180000]\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.527644  [  128/180000]\n",
      "loss: 0.543228  [12928/180000]\n",
      "loss: 0.568050  [25728/180000]\n",
      "loss: 0.571949  [38528/180000]\n",
      "loss: 0.543377  [51328/180000]\n",
      "loss: 0.508831  [64128/180000]\n",
      "loss: 0.555512  [76928/180000]\n",
      "loss: 0.549045  [89728/180000]\n",
      "loss: 0.566607  [102528/180000]\n",
      "loss: 0.540534  [115328/180000]\n",
      "loss: 0.585648  [128128/180000]\n",
      "loss: 0.535476  [140928/180000]\n",
      "loss: 0.535758  [153728/180000]\n",
      "loss: 0.463853  [166528/180000]\n",
      "loss: 0.516379  [179328/180000]\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.539661  [  128/180000]\n",
      "loss: 0.519415  [12928/180000]\n",
      "loss: 0.466971  [25728/180000]\n",
      "loss: 0.498673  [38528/180000]\n",
      "loss: 0.535272  [51328/180000]\n",
      "loss: 0.541544  [64128/180000]\n",
      "loss: 0.527801  [76928/180000]\n",
      "loss: 0.529566  [89728/180000]\n",
      "loss: 0.506339  [102528/180000]\n",
      "loss: 0.537406  [115328/180000]\n",
      "loss: 0.544260  [128128/180000]\n",
      "loss: 0.512371  [140928/180000]\n",
      "loss: 0.520853  [153728/180000]\n",
      "loss: 0.557741  [166528/180000]\n",
      "loss: 0.545111  [179328/180000]\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.561377  [  128/180000]\n",
      "loss: 0.548296  [12928/180000]\n",
      "loss: 0.509818  [25728/180000]\n",
      "loss: 0.493680  [38528/180000]\n",
      "loss: 0.523736  [51328/180000]\n",
      "loss: 0.502112  [64128/180000]\n",
      "loss: 0.525584  [76928/180000]\n",
      "loss: 0.550576  [89728/180000]\n",
      "loss: 0.520104  [102528/180000]\n",
      "loss: 0.556416  [115328/180000]\n",
      "loss: 0.547569  [128128/180000]\n",
      "loss: 0.544736  [140928/180000]\n",
      "loss: 0.499856  [153728/180000]\n",
      "loss: 0.490763  [166528/180000]\n",
      "loss: 0.502138  [179328/180000]\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.560313  [  128/180000]\n",
      "loss: 0.511777  [12928/180000]\n",
      "loss: 0.566744  [25728/180000]\n",
      "loss: 0.495598  [38528/180000]\n",
      "loss: 0.542571  [51328/180000]\n",
      "loss: 0.491474  [64128/180000]\n",
      "loss: 0.492126  [76928/180000]\n",
      "loss: 0.517302  [89728/180000]\n",
      "loss: 0.521649  [102528/180000]\n",
      "loss: 0.548064  [115328/180000]\n",
      "loss: 0.542288  [128128/180000]\n",
      "loss: 0.488309  [140928/180000]\n",
      "loss: 0.520752  [153728/180000]\n",
      "loss: 0.503039  [166528/180000]\n",
      "loss: 0.538550  [179328/180000]\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.600101  [  128/180000]\n",
      "loss: 0.522076  [12928/180000]\n",
      "loss: 0.519832  [25728/180000]\n",
      "loss: 0.499095  [38528/180000]\n",
      "loss: 0.599110  [51328/180000]\n",
      "loss: 0.513101  [64128/180000]\n",
      "loss: 0.534936  [76928/180000]\n",
      "loss: 0.610446  [89728/180000]\n",
      "loss: 0.495227  [102528/180000]\n",
      "loss: 0.494452  [115328/180000]\n",
      "loss: 0.497007  [128128/180000]\n",
      "loss: 0.516128  [140928/180000]\n",
      "loss: 0.489247  [153728/180000]\n",
      "loss: 0.538035  [166528/180000]\n",
      "loss: 0.536011  [179328/180000]\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.524847  [  128/180000]\n",
      "loss: 0.567137  [12928/180000]\n",
      "loss: 0.457283  [25728/180000]\n",
      "loss: 0.580601  [38528/180000]\n",
      "loss: 0.510475  [51328/180000]\n",
      "loss: 0.521139  [64128/180000]\n",
      "loss: 0.568631  [76928/180000]\n",
      "loss: 0.515461  [89728/180000]\n",
      "loss: 0.532122  [102528/180000]\n",
      "loss: 0.579581  [115328/180000]\n",
      "loss: 0.585434  [128128/180000]\n",
      "loss: 0.543558  [140928/180000]\n",
      "loss: 0.544199  [153728/180000]\n",
      "loss: 0.560532  [166528/180000]\n",
      "loss: 0.573760  [179328/180000]\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.528693  [  128/180000]\n",
      "loss: 0.558951  [12928/180000]\n",
      "loss: 0.514840  [25728/180000]\n",
      "loss: 0.537844  [38528/180000]\n",
      "loss: 0.534346  [51328/180000]\n",
      "loss: 0.485574  [64128/180000]\n",
      "loss: 0.523641  [76928/180000]\n",
      "loss: 0.512700  [89728/180000]\n",
      "loss: 0.561295  [102528/180000]\n",
      "loss: 0.521920  [115328/180000]\n",
      "loss: 0.554924  [128128/180000]\n",
      "loss: 0.538334  [140928/180000]\n",
      "loss: 0.497130  [153728/180000]\n",
      "loss: 0.525787  [166528/180000]\n",
      "loss: 0.466724  [179328/180000]\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.539609  [  128/180000]\n",
      "loss: 0.524771  [12928/180000]\n",
      "loss: 0.537350  [25728/180000]\n",
      "loss: 0.551195  [38528/180000]\n",
      "loss: 0.602762  [51328/180000]\n",
      "loss: 0.528449  [64128/180000]\n",
      "loss: 0.525443  [76928/180000]\n",
      "loss: 0.542089  [89728/180000]\n",
      "loss: 0.467068  [102528/180000]\n",
      "loss: 0.551997  [115328/180000]\n",
      "loss: 0.474899  [128128/180000]\n",
      "loss: 0.534259  [140928/180000]\n",
      "loss: 0.585871  [153728/180000]\n",
      "loss: 0.501277  [166528/180000]\n",
      "loss: 0.565697  [179328/180000]\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.568361  [  128/180000]\n",
      "loss: 0.459692  [12928/180000]\n",
      "loss: 0.513390  [25728/180000]\n",
      "loss: 0.598237  [38528/180000]\n",
      "loss: 0.491413  [51328/180000]\n",
      "loss: 0.525292  [64128/180000]\n",
      "loss: 0.528340  [76928/180000]\n",
      "loss: 0.479048  [89728/180000]\n",
      "loss: 0.466378  [102528/180000]\n",
      "loss: 0.571103  [115328/180000]\n",
      "loss: 0.567989  [128128/180000]\n",
      "loss: 0.480991  [140928/180000]\n",
      "loss: 0.492088  [153728/180000]\n",
      "loss: 0.471824  [166528/180000]\n",
      "loss: 0.520217  [179328/180000]\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.513820  [  128/180000]\n",
      "loss: 0.562828  [12928/180000]\n",
      "loss: 0.507186  [25728/180000]\n",
      "loss: 0.441118  [38528/180000]\n",
      "loss: 0.442047  [51328/180000]\n",
      "loss: 0.538008  [64128/180000]\n",
      "loss: 0.487798  [76928/180000]\n",
      "loss: 0.533605  [89728/180000]\n",
      "loss: 0.509296  [102528/180000]\n",
      "loss: 0.494498  [115328/180000]\n",
      "loss: 0.446526  [128128/180000]\n",
      "loss: 0.551475  [140928/180000]\n",
      "loss: 0.511399  [153728/180000]\n",
      "loss: 0.509330  [166528/180000]\n",
      "loss: 0.497837  [179328/180000]\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.520833  [  128/180000]\n",
      "loss: 0.540774  [12928/180000]\n",
      "loss: 0.483045  [25728/180000]\n",
      "loss: 0.537405  [38528/180000]\n",
      "loss: 0.511164  [51328/180000]\n",
      "loss: 0.576127  [64128/180000]\n",
      "loss: 0.535647  [76928/180000]\n",
      "loss: 0.500550  [89728/180000]\n",
      "loss: 0.535845  [102528/180000]\n",
      "loss: 0.473627  [115328/180000]\n",
      "loss: 0.511621  [128128/180000]\n",
      "loss: 0.563330  [140928/180000]\n",
      "loss: 0.495143  [153728/180000]\n",
      "loss: 0.520799  [166528/180000]\n",
      "loss: 0.520323  [179328/180000]\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.483153  [  128/180000]\n",
      "loss: 0.575229  [12928/180000]\n",
      "loss: 0.517314  [25728/180000]\n",
      "loss: 0.506367  [38528/180000]\n",
      "loss: 0.574711  [51328/180000]\n",
      "loss: 0.545646  [64128/180000]\n",
      "loss: 0.534490  [76928/180000]\n",
      "loss: 0.556098  [89728/180000]\n",
      "loss: 0.508448  [102528/180000]\n",
      "loss: 0.541952  [115328/180000]\n",
      "loss: 0.472817  [128128/180000]\n",
      "loss: 0.490764  [140928/180000]\n",
      "loss: 0.532177  [153728/180000]\n",
      "loss: 0.480999  [166528/180000]\n",
      "loss: 0.613568  [179328/180000]\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.545812  [  128/180000]\n",
      "loss: 0.493703  [12928/180000]\n",
      "loss: 0.531221  [25728/180000]\n",
      "loss: 0.509376  [38528/180000]\n",
      "loss: 0.557678  [51328/180000]\n",
      "loss: 0.474531  [64128/180000]\n",
      "loss: 0.513076  [76928/180000]\n",
      "loss: 0.510191  [89728/180000]\n",
      "loss: 0.530341  [102528/180000]\n",
      "loss: 0.565157  [115328/180000]\n",
      "loss: 0.522555  [128128/180000]\n",
      "loss: 0.541194  [140928/180000]\n",
      "loss: 0.516058  [153728/180000]\n",
      "loss: 0.532228  [166528/180000]\n",
      "loss: 0.586371  [179328/180000]\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.595261  [  128/180000]\n",
      "loss: 0.553554  [12928/180000]\n",
      "loss: 0.535434  [25728/180000]\n",
      "loss: 0.528159  [38528/180000]\n",
      "loss: 0.516706  [51328/180000]\n",
      "loss: 0.563343  [64128/180000]\n",
      "loss: 0.597455  [76928/180000]\n",
      "loss: 0.526819  [89728/180000]\n",
      "loss: 0.538346  [102528/180000]\n",
      "loss: 0.567910  [115328/180000]\n",
      "loss: 0.491660  [128128/180000]\n",
      "loss: 0.627485  [140928/180000]\n",
      "loss: 0.546463  [153728/180000]\n",
      "loss: 0.491630  [166528/180000]\n",
      "loss: 0.499463  [179328/180000]\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.529714  [  128/180000]\n",
      "loss: 0.448389  [12928/180000]\n",
      "loss: 0.557759  [25728/180000]\n",
      "loss: 0.569565  [38528/180000]\n",
      "loss: 0.531342  [51328/180000]\n",
      "loss: 0.465962  [64128/180000]\n",
      "loss: 0.508984  [76928/180000]\n",
      "loss: 0.504909  [89728/180000]\n",
      "loss: 0.554103  [102528/180000]\n",
      "loss: 0.526165  [115328/180000]\n",
      "loss: 0.568659  [128128/180000]\n",
      "loss: 0.532132  [140928/180000]\n",
      "loss: 0.483879  [153728/180000]\n",
      "loss: 0.480506  [166528/180000]\n",
      "loss: 0.485733  [179328/180000]\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.557155  [  128/180000]\n",
      "loss: 0.544009  [12928/180000]\n",
      "loss: 0.514484  [25728/180000]\n",
      "loss: 0.526932  [38528/180000]\n",
      "loss: 0.558321  [51328/180000]\n",
      "loss: 0.537308  [64128/180000]\n",
      "loss: 0.451420  [76928/180000]\n",
      "loss: 0.512661  [89728/180000]\n",
      "loss: 0.523511  [102528/180000]\n",
      "loss: 0.527188  [115328/180000]\n",
      "loss: 0.517202  [128128/180000]\n",
      "loss: 0.538821  [140928/180000]\n",
      "loss: 0.476409  [153728/180000]\n",
      "loss: 0.559035  [166528/180000]\n",
      "loss: 0.545163  [179328/180000]\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.516670  [  128/180000]\n",
      "loss: 0.513129  [12928/180000]\n",
      "loss: 0.540655  [25728/180000]\n",
      "loss: 0.530605  [38528/180000]\n",
      "loss: 0.473913  [51328/180000]\n",
      "loss: 0.456683  [64128/180000]\n",
      "loss: 0.516149  [76928/180000]\n",
      "loss: 0.498479  [89728/180000]\n",
      "loss: 0.491405  [102528/180000]\n",
      "loss: 0.557030  [115328/180000]\n",
      "loss: 0.511159  [128128/180000]\n",
      "loss: 0.510257  [140928/180000]\n",
      "loss: 0.497002  [153728/180000]\n",
      "loss: 0.506351  [166528/180000]\n",
      "loss: 0.520367  [179328/180000]\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.510411  [  128/180000]\n",
      "loss: 0.500082  [12928/180000]\n",
      "loss: 0.561125  [25728/180000]\n",
      "loss: 0.575988  [38528/180000]\n",
      "loss: 0.525007  [51328/180000]\n",
      "loss: 0.502262  [64128/180000]\n",
      "loss: 0.584420  [76928/180000]\n",
      "loss: 0.533117  [89728/180000]\n",
      "loss: 0.518266  [102528/180000]\n",
      "loss: 0.459403  [115328/180000]\n",
      "loss: 0.538909  [128128/180000]\n",
      "loss: 0.504476  [140928/180000]\n",
      "loss: 0.529941  [153728/180000]\n",
      "loss: 0.505543  [166528/180000]\n",
      "loss: 0.459995  [179328/180000]\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.557840  [  128/180000]\n",
      "loss: 0.523261  [12928/180000]\n",
      "loss: 0.522659  [25728/180000]\n",
      "loss: 0.558945  [38528/180000]\n",
      "loss: 0.519697  [51328/180000]\n",
      "loss: 0.526490  [64128/180000]\n",
      "loss: 0.506182  [76928/180000]\n",
      "loss: 0.523221  [89728/180000]\n",
      "loss: 0.577575  [102528/180000]\n",
      "loss: 0.504221  [115328/180000]\n",
      "loss: 0.585902  [128128/180000]\n",
      "loss: 0.554150  [140928/180000]\n",
      "loss: 0.494072  [153728/180000]\n",
      "loss: 0.489267  [166528/180000]\n",
      "loss: 0.504771  [179328/180000]\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.514896  [  128/180000]\n",
      "loss: 0.502269  [12928/180000]\n",
      "loss: 0.539919  [25728/180000]\n",
      "loss: 0.471524  [38528/180000]\n",
      "loss: 0.508323  [51328/180000]\n",
      "loss: 0.598964  [64128/180000]\n",
      "loss: 0.526505  [76928/180000]\n",
      "loss: 0.516731  [89728/180000]\n",
      "loss: 0.501969  [102528/180000]\n",
      "loss: 0.543586  [115328/180000]\n",
      "loss: 0.517659  [128128/180000]\n",
      "loss: 0.561710  [140928/180000]\n",
      "loss: 0.596708  [153728/180000]\n",
      "loss: 0.498403  [166528/180000]\n",
      "loss: 0.460575  [179328/180000]\n",
      "Done!\n",
      "Train data:\n",
      "AUC value is: 0.7083724585130489\n",
      "Accuracy is: 0.7137388888888889\n",
      "Test data:\n",
      "AUC value is: 0.7162211072681857\n",
      "Accuracy is: 0.68355\n"
     ]
    }
   ],
   "source": [
    "# 创建对应的dotaloader\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, batch_size=BATCH_SIZE, shuffle=False)\n",
    "batch_size = BATCH_SIZE\n",
    "model = MODEL\n",
    "epochs = EPOCHS\n",
    "data_name = DATA_NAME \n",
    "weight_part = \"{}_{}_epoch{}_model\".format(data_name, model, epochs)\n",
    "loss_data_all = np.load('../outputs_save/Purchase100_limited_loss.npy')\n",
    "score_all = np.load('../outputs_save/Purchase100_limited_score.npy')\n",
    "conf_data_all = np.load('../outputs_save/Purchase100_limited_conf.npy')\n",
    "pri_risk_all = get_risk_score(loss_data_all, train_keep)\n",
    "pri_risk_rank = np.argsort(pri_risk_all)\n",
    "pri_risk_rank = np.flip(pri_risk_rank)\n",
    "# 训练影子攻击模型\n",
    "attack_model = shadow_attack(sha_models=sha_models, tar_model=tar_model, model_num=num_shadowsets, weight_dir=weight_dir, data_name=DATA_NAME, model=MODEL, model_transform=model_transform, \n",
    "                  model_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=attack_lr, attack_epochs=30, attack_transform=attack_transform, \n",
    "                  device=device, prop_keep=0.5, top_k=3, attack_class=attack_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "6808b796-8911-48e8-811d-3c138ffdd1e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "top_risk = 2000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "8c18d295-8628-453d-b3ee-292c8ea0cebb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "PurchaseClassifier(\n",
       "  (features): Sequential(\n",
       "    (0): Linear(in_features=600, out_features=1024, bias=True)\n",
       "    (1): Tanh()\n",
       "    (2): Linear(in_features=1024, out_features=512, bias=True)\n",
       "    (3): Tanh()\n",
       "    (4): Linear(in_features=512, out_features=256, bias=True)\n",
       "    (5): Tanh()\n",
       "    (6): Linear(in_features=256, out_features=128, bias=True)\n",
       "    (7): Tanh()\n",
       "  )\n",
       "  (classifier): Linear(in_features=128, out_features=100, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "TargetModel = PurchaseClassifier()\n",
    "weight_path = os.path.join(weight_dir, \"{}_AdvReg_protected_model.pth\".format(data_name))\n",
    "TargetModel.load_state_dict(torch.load(weight_path))\n",
    "TargetModel.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "a6625034-f82f-400c-a9e9-2e93a0b9b564",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_fn = nn.CrossEntropyLoss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "4c579718-2fa9-4feb-993c-56a39087f77c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 87.9%, Avg loss: 0.426414 \n",
      "\n",
      "average_base_attack 0.5652\n",
      "risk_base_attack 0.728\n",
      " Error: \n",
      " Accuracy: 87.9%  \n",
      "\n",
      "AUC value is: 0.5278994002388874\n",
      "Accuracy is: 0.4911333333333333\n",
      "average_lira_attack 0.4911333333333333\n",
      "risk_lira_attack 0.53\n",
      " Error: \n",
      " Accuracy: 87.9%  \n",
      "\n",
      "AUC value is: 0.5005107252298263\n",
      "Accuracy is: 0.5105\n",
      "risk_shadow_attack 0.5105\n",
      "AUC value is: 0.5020922803928838\n",
      "Accuracy is: 0.49978333333333336\n",
      "average_shadow_attack 0.49978333333333336\n"
     ]
    }
   ],
   "source": [
    "# 执行基线攻击\n",
    "pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "accuracy = metrics.accuracy_score(train_keep[0], pred_result)\n",
    "print('average_base_attack',accuracy)\n",
    "pred_clip = pred_result[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print('risk_base_attack', accuracy)\n",
    "# 执行似然比攻击\n",
    "_, score = get_score_from_model(all_dataloader, TargetModel, device)\n",
    "pred_result = LIRA_attack(train_keep, score_all, score, train_keep[0])\n",
    "accuracy = evaluate_ROC(pred_result, train_keep[0], threshold=0)\n",
    "print('average_lira_attack',accuracy)\n",
    "pred_clip = pred_result[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print('risk_lira_attack',accuracy)\n",
    "# 执行影子模型攻击\n",
    "\n",
    "# 提取数据集在模型上的置信度输出\n",
    "targetX, _ = get_model_pred(all_dataloader, TargetModel, device)\n",
    "targetX = targetX.detach().cpu().numpy()\n",
    "targetX = targetX.astype(np.float32)\n",
    "\n",
    "top_k = 3\n",
    "if top_k:\n",
    "    # 仅使用概率向量的前3个值\n",
    "    targetX, _ = get_top_k_conf(top_k, targetX, targetX)\n",
    "\n",
    "shadow_attack_data = CustomDataset(targetX, train_keep[0], attack_transform)\n",
    "shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "\n",
    "pred_clip = attack_test_scores[pri_risk_rank[:top_risk]]\n",
    "mem_clip = train_keep[0][pri_risk_rank[:top_risk]]\n",
    "accuracy = evaluate_ROC(pred_clip, mem_clip)\n",
    "print('risk_shadow_attack',accuracy)\n",
    "\n",
    "accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "print('average_shadow_attack',accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "9870c0ca-4bb3-4c82-a5fb-d71f83918c09",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 81.5%, Avg loss: 0.593766 \n",
      "\n",
      "model_test_correct 0.8145\n"
     ]
    }
   ],
   "source": [
    "correct = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "print('model_test_correct',correct)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45e6c2ce-097e-4aad-879c-7e3a343d38b5",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "94ad2d94-9a36-4894-bbad-b30375a0b218",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "opacus",
   "language": "python",
   "name": "opacus"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
