{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "6b439e84-8c6b-4126-8586-d7a1a7c7614e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 该代码文件主要为了在MNIST上执行相关实验，获取实验数据。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "daf0a1f8-ef8d-4246-b945-b6e5d905f5f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基线，基于阈值，似然比，添加噪声鲁棒性"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "3a72ff09-8088-4a9d-b1f7-0825ff54c477",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 实验目标，可变的参数，输出的结果\n",
    "# 预期结果\n",
    "# 1、高风险指标的损失分布直方图\n",
    "# 2、base攻击成功率随风险变化图\n",
    "# 3、离群点邻居、距离参数控制的输出点\n",
    "# 4、同一种攻击方式、离群点和风险指标的攻击成功率对比\n",
    "# 5、同样的风险点，不同攻击方法的成功率比较\n",
    "# 6、模型训练集大小的影响\n",
    "# 7、参考模型数量的影响\n",
    "\n",
    "\n",
    "# 需要控制的实验参数：\n",
    "# 1、攻击方法\n",
    "# 2、离群点比例\n",
    "# 3、模型训练集大小"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "b61f101e-1c1e-41df-b80b-65d1e3d6eab8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.utils.data import DataLoader\n",
    "from torch.utils.data import Dataset\n",
    "from torchvision import datasets\n",
    "from torchvision import transforms\n",
    "from torchvision.transforms import ToTensor\n",
    "import torchvision.transforms as tt\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn import metrics\n",
    "\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "37ddaa77-35ce-49b9-acfd-a7799aadd9a5",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入自己创建的python文件\n",
    "import sys\n",
    "sys.path.append(\"..\") # Adds higher directory to python modules path.\n",
    "from frame.DataProcess import *\n",
    "from frame.TrainUtil import *\n",
    "from frame.LIRAAttack import *\n",
    "from frame.AttackUtil import *\n",
    "from frame.ShadowAttack import *\n",
    "from frame.ThresholdAttack import *\n",
    "from frame.LabelAttack import *"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "4636a18e-244e-4a21-ba21-591c0295ce7a",
   "metadata": {},
   "outputs": [],
   "source": [
    "LEARNING_RATE = 1e-3\n",
    "BATCH_SIZE = 128\n",
    "MODEL = 'CNN'\n",
    "EPOCHS = 50\n",
    "DATA_NAME = 'MNIST' \n",
    "weight_dir = os.path.join('..', 'weights_for_exp', DATA_NAME)\n",
    "weight_dir = os.path.join('..', 'weights_for_exp', DATA_NAME)\n",
    "num_shadowsets = 100\n",
    "seed = 0\n",
    "prop_keep = 0.5\n",
    "\n",
    "model_transform = transforms.Compose([\n",
    "    transforms.ToTensor(),\n",
    "    transforms.Normalize(mean=[0.5], std=[0.5])\n",
    "    ])\n",
    "attack_transform = transforms.Compose([])\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "# 影子模型攻击相关参数\n",
    "sha_models = [1,2,3] #[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]\n",
    "tar_model = 0\n",
    "attack_class = False #是否针对每个类别分别攻击\n",
    "attack_lr = 5e-4"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "2dd6c9bd-7872-412b-baae-4024e612e35b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 加载完整的训练数据集\n",
    "X_data, Y_data, train_keep = load_MNIST_keep(num_shadowsets, prop_keep, seed)\n",
    "all_data = CustomDataset(X_data, Y_data, model_transform)\n",
    "all_dataloader = DataLoader(all_data, batch_size=64, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "0e839f4d-93e7-48f0-af07-349ebef049a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = BATCH_SIZE\n",
    "model = MODEL\n",
    "epochs = EPOCHS\n",
    "data_name = DATA_NAME \n",
    "weight_part = \"{}_{}_epoch{}_model\".format(data_name, model, epochs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8462191b-4d92-402e-bd5b-03ab395bb76d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "485fc1c7-ab63-4c7b-a8b5-94744b0a9257",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073895 \n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.9900166666666667"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 加载一个模型进行测试验证\n",
    "Model = globals()['create_{}_model'.format(model)](Y_data.max()+1, data_name)\n",
    "weight_path = os.path.join(weight_dir, weight_part + \"{}.pth\".format(0))\n",
    "# print(Reference_Model)\n",
    "Model.load_state_dict(torch.load(weight_path))\n",
    "Model.to(device)\n",
    "\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "evaluate(all_dataloader, Model, loss_fn, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "37a742e3-cb9c-47df-87a3-965d473885db",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "M_CNN(\n",
       "  (conv1): Conv2d(1, 16, kernel_size=(8, 8), stride=(2, 2), padding=(3, 3))\n",
       "  (conv2): Conv2d(16, 32, kernel_size=(4, 4), stride=(2, 2))\n",
       "  (fc1): Linear(in_features=512, out_features=32, bias=True)\n",
       "  (fc2): Linear(in_features=32, out_features=10, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "Model"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8bf4ae41-482b-464b-b2df-f79924f00fe2",
   "metadata": {},
   "source": [
    "## 脆弱点的两种提取方式"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a20f6954-5154-483b-a64b-8ab536b4989b",
   "metadata": {},
   "source": [
    "### 风险指标"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "90586eba-a7f4-4a19-a8a8-54d5d8633693",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# # 加载所有参考模型上的损失、置信度、得分输出\n",
    "# conf_data_all, label_data, score_all = load_score_data_all(X_data, Y_data, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)\n",
    "# loss_fn = nn.CrossEntropyLoss(reduction='none')\n",
    "# loss_data_all, label_data = load_loss_data_all(X_data, Y_data, loss_fn, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "5fbe2bf6-1750-4c23-91a8-80caf9b0825e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# np.save('../outputs_save/MNIST_loss.npy', loss_data_all)\n",
    "# np.save('../outputs_save/MNIST_score.npy', score_all)\n",
    "# np.save('../outputs_save/MNIST_conf.npy', conf_data_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "5a6e9c56-60bd-4bfc-a912-5e3b5eb52a4e",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_data_all = np.load('../outputs_save/MNIST_loss.npy')\n",
    "score_all = np.load('../outputs_save/MNIST_score.npy')\n",
    "conf_data_all = np.load('../outputs_save/MNIST_conf.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "8b45bd6b-c391-4ced-99bd-57b0b8c3c30b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 为每个数据点计算风险指标\n",
    "# 计算出一个点的脆弱程度评分\n",
    "pri_risk_all = get_risk_score(loss_data_all, train_keep)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "d8258b9c-d6f3-4e64-ab4f-4b068bb4b209",
   "metadata": {},
   "outputs": [],
   "source": [
    "pri_risk_rank = np.argsort(pri_risk_all)\n",
    "pri_risk_rank = np.flip(pri_risk_rank)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "16cc7233-a9a0-4e74-990d-70e0e6ebfb74",
   "metadata": {},
   "source": [
    "### 离群点"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "e7e42cbf-89f1-4d1f-bbd0-dc87252c22cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 准备好logits的输出\n",
    "# 计算余弦相似度 5w*5w的大型矩阵\n",
    "# 邻居距离alpha，邻居数量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "8d4c50e3-0ca3-463c-ad2a-4e4fd987b12e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# logits_data_all, label_data = load_logits_data_all(X_data, Y_data, weight_dir, num_shadowsets, data_name, model, weight_part, model_transform, batch_size, device)\n",
    "# np.save('../outputs_save/MNIST_logits.npy', logits_data_all)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "a9314bc7-f529-46f5-834b-d1c387effef7",
   "metadata": {},
   "outputs": [],
   "source": [
    "logits_data_all = np.load('../outputs_save/MNIST_logits.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "50d2365f-9e9d-4ffc-a9f7-16c1d3ed5646",
   "metadata": {},
   "outputs": [],
   "source": [
    "# # 按照k个模型进行拼接\n",
    "# k = 10\n",
    "# for i in range(k):\n",
    "#     if i == 0:\n",
    "#         combine_features = logits_data_all[i]\n",
    "#     else:\n",
    "#         combine_features = np.concatenate((combine_features, logits_data_all[i]),axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "1308ab27-3798-4a3c-8dd2-b5ce0234042e",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# # 数据量太大，不能保存所有的余弦相似度，只能需要时计算\n",
    "# alpha_list = [0.05, 0.1, 0.12, 0.15, 0.2, 0.3]\n",
    "# n_num_list = []\n",
    "# for i in range(combine_features.shape[0]):\n",
    "# # for i in range(10000):\n",
    "#     n_count = [0 for _ in alpha_list]\n",
    "#     if i%50 == 0:\n",
    "#         print(f\"compute to: {i}\")\n",
    "#     for j in range(combine_features.shape[0]):\n",
    "#         # 余弦距离的计算\n",
    "#         vec1 = combine_features[i]\n",
    "#         vec2 = combine_features[j]        \n",
    "#         cos_sim = vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))\n",
    "#         cos_dis = 0.5 - 0.5 * cos_sim\n",
    "#         for m in range(len(alpha_list)):\n",
    "#             if (cos_dis < alpha_list[m]):\n",
    "#                 n_count[m] += 1\n",
    "#     n_num_list.append(n_count)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "ae886e98-bf25-4c0d-a2fa-f2c948574aa8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# neigh_data_all = np.array(n_num_list)\n",
    "# np.save('../outputs_save/MNIST_neigh.npy', neigh_data_all)\n",
    "neigh_data_all = np.load('../outputs_save/MNIST_neigh.npy')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "79592536-1181-4647-9e40-fc959daa668e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(60000, 6)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "neigh_data_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "9d815b0b-568b-46d5-bdeb-fba567a8e213",
   "metadata": {},
   "outputs": [],
   "source": [
    "neigh_num = neigh_data_all[:,1]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "7bc974ea-ffb5-432f-b9da-ab7abce10270",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(60000,)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "neigh_num.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "ddd4b8fa-1653-4bab-bf8f-4e2a116b17b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "risk_rank = np.argsort(neigh_num)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "789a839a-cb2b-4da7-a1a2-ecac971344ef",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([47759,   418, 38408, ..., 50329, 47227, 47918])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "risk_rank"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cce6c5e2-b030-4874-b08e-bf8466815484",
   "metadata": {},
   "source": [
    "## 针对脆弱点展开攻击"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "14cb98e0-737b-4ed0-93bd-a52681974606",
   "metadata": {},
   "source": [
    "### 基线攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "cb9c0f1c-535d-4b62-b414-9ae31c88b033",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 6000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "8c66acdc-645a-4cf8-b0f6-babc51bce18a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测正确的判断为成员，预测不正确的判断为非成员"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "ecc0a131-e71a-4cd1-b5a3-a6c750abc382",
   "metadata": {},
   "outputs": [],
   "source": [
    "tmp = conf_data_all.argmax(2)\n",
    "pred_result = (tmp == Y_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "bc3f0070-f74a-4470-a2a2-ce343fe6db29",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(100, 60000)"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tmp.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "670ebafe-5710-4b0c-b05d-008a6098dbc8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.5472233333333333\n",
      "MPLR 0.5623116666666667\n",
      "base 0.5065051666666667\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c08b9a40-068e-4411-8a51-13a05337a7cd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "5cf3f668-0b0b-41b1-bc5f-73f89bed9169",
   "metadata": {},
   "source": [
    "### 阈值攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "61729130-6c97-4a19-b7c5-767f6ba1ae70",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基于损失的阈值去做攻击，阈值如何确定？两个均值的均值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "27c52cfb-4617-47af-80ab-51c0a9055bba",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 6000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "0dd9f430-c918-47cd-9e8d-4ee1b35bf143",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss_threshold = get_loss_threshold(loss_data_all, train_keep)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "18f8fd0e-f86a-47fc-8105-7a3a53b10b28",
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_result = loss_data_all < loss_threshold"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "ef57e35b-e721-4cea-ad31-6b7cc4ef5362",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.56732\n",
      "MPLR 0.5984833333333334\n",
      "base 0.5098468333333334\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "736cdbcb-9dd3-45ab-bcc4-37c7f490a588",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8259bc0c-fbc1-4025-bf4a-b194236a39ff",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "786da5f1-1ae9-4f9b-ae7f-e18702ea84a7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "77876b58-669a-483a-b80c-8b041486eda0",
   "metadata": {},
   "source": [
    "### 似然比攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "f1304cdd-cd2c-437f-af20-bdd3eb68caab",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 先对所有目标数据执行攻击，然后根据脆弱点筛选获取对应的攻击成功率或者ROC\n",
    "# 输出两个，memlabel和pred_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "c9ed8ba5-393b-4bdd-8a65-bbd6bbcaf4b6",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "AUC value is: 0.5704715637885492\n",
      "Accuracy is: 0.5523833333333333\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "0.5523833333333333"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_result = LIRA_attack(train_keep, score_all, score_all[0], train_keep[0])\n",
    "evaluate_ROC(pred_result, train_keep[0], threshold=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "df7f2d04-8094-48e7-91a3-b744ad2028d5",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0\n",
      "1\n",
      "2\n",
      "3\n",
      "4\n",
      "5\n",
      "6\n",
      "7\n",
      "8\n",
      "9\n",
      "10\n",
      "11\n",
      "12\n",
      "13\n",
      "14\n",
      "15\n",
      "16\n",
      "17\n",
      "18\n",
      "19\n",
      "20\n",
      "21\n",
      "22\n",
      "23\n",
      "24\n",
      "25\n",
      "26\n",
      "27\n",
      "28\n",
      "29\n",
      "30\n",
      "31\n",
      "32\n",
      "33\n",
      "34\n",
      "35\n",
      "36\n",
      "37\n",
      "38\n",
      "39\n",
      "40\n",
      "41\n",
      "42\n",
      "43\n",
      "44\n",
      "45\n",
      "46\n",
      "47\n",
      "48\n",
      "49\n",
      "50\n",
      "51\n",
      "52\n",
      "53\n",
      "54\n",
      "55\n",
      "56\n",
      "57\n",
      "58\n",
      "59\n",
      "60\n",
      "61\n",
      "62\n",
      "63\n",
      "64\n",
      "65\n",
      "66\n",
      "67\n",
      "68\n",
      "69\n",
      "70\n",
      "71\n",
      "72\n",
      "73\n",
      "74\n",
      "75\n",
      "76\n",
      "77\n",
      "78\n",
      "79\n",
      "80\n",
      "81\n",
      "82\n",
      "83\n",
      "84\n",
      "85\n",
      "86\n",
      "87\n",
      "88\n",
      "89\n",
      "90\n",
      "91\n",
      "92\n",
      "93\n",
      "94\n",
      "95\n",
      "96\n",
      "97\n",
      "98\n",
      "99\n"
     ]
    }
   ],
   "source": [
    "for i in range(num_shadowsets):\n",
    "    pred_result = LIRA_attack(train_keep, score_all, score_all[i], train_keep[i])\n",
    "    pred_result = pred_result.reshape(1, len(pred_result))\n",
    "    if i==0:\n",
    "        pred_result_all = pred_result\n",
    "    else:\n",
    "        pred_result_all = np.concatenate((pred_result_all, pred_result), axis=0)\n",
    "    print(i)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "29a1918c-1d13-46f7-89d2-2f430a3196cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "lower_bound, upper_bound = 1, 10\n",
    "indices = np.where((pri_risk_all >= lower_bound) & (pri_risk_all <= upper_bound))\n",
    "pred_clip = pred_result_all[:, indices]\n",
    "mem_clip = train_keep[:, indices]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "9ff058e4-a5a5-421a-a656-fff6c8cf0c24",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.8338550724637681\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a9fc266-09b8-4f5d-89d4-94b64f3fa79d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "7aa8ea3d-20a2-4557-afda-f2707149645e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# X_axi = []\n",
    "# Y_axi = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "13beba74-a997-4fe2-9235-4031e6c89645",
   "metadata": {},
   "outputs": [],
   "source": [
    "# for i in range(10000):\n",
    "#     pred_t = pred_result_all[:,i]\n",
    "#     pred_t = pred_t > 0\n",
    "#     mem_t = train_keep[:,i]\n",
    "#     risk_t = pri_risk_all[i]\n",
    "#     acc = metrics.accuracy_score(mem_t, pred_t)\n",
    "#     X_axi.append(risk_t)\n",
    "#     Y_axi.append(acc)\n",
    "\n",
    "# df=pd.DataFrame({'risk': X_axi, 'attack_acc': Y_axi })\n",
    "# df.to_csv('MNIST_risk_att.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "4cedb4b3-6c50-4b70-9984-e2cb539a1c99",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(100, 60000)"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pred_result_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "9a529dff-8655-4113-af5c-7cb62cd248e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 6000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "5fdb2095-a077-40a4-b954-c4947b015080",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.59532\n",
      "MPLR 0.6147266666666666\n",
      "base 0.5505388333333333\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result_all[:, risk_rank[:x]]\n",
    "mem_clip = train_keep[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result_all[:, pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result_all\n",
    "mem_clip = train_keep\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "pred_clip = pred_clip  > 0\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18d603e5-9327-46b5-8f8b-ce7f3bd144e7",
   "metadata": {},
   "source": [
    "###### "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "26be2f5a-29e3-45c7-b6f0-c5382828f63f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "457be14e-4b30-488a-aab0-0ae0b345e27e",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "8ea3f420-9a04-420f-83b6-06ea1658f6c7",
   "metadata": {},
   "source": [
    "### 影子模型攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "7c5e0038-e2b6-4997-a64f-c9e80ed2c9c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 在所有数据上执行一次攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c749854-a9ed-4ffe-826b-b61f1e00547b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "attack_model = shadow_attack(sha_models=sha_models, tar_model=tar_model, model_num=num_shadowsets, weight_dir=weight_dir, data_name=DATA_NAME, model=MODEL, model_transform=model_transform, \n",
    "                  model_epochs=EPOCHS, batch_size=BATCH_SIZE, learning_rate=attack_lr, attack_epochs=30, attack_transform=attack_transform, \n",
    "                  device=device, prop_keep=0.5, top_k=3, attack_class=attack_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "ab8c2490-c230-4516-a19a-df84a3779011",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Attack_NN(\n",
       "  (linear_relu_stack): Sequential(\n",
       "    (0): Linear(in_features=4, out_features=128, bias=True)\n",
       "    (1): ReLU()\n",
       "    (2): Linear(in_features=128, out_features=64, bias=True)\n",
       "    (3): ReLU()\n",
       "    (4): Linear(in_features=64, out_features=1, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "attack_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "d1f87436-bae2-410f-a025-e9757ecd17d9",
   "metadata": {},
   "outputs": [],
   "source": [
    "for tar_model in range(50,100):\n",
    "    targetX = conf_data_all[tar_model].astype(np.float32)\n",
    "    pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "    targetY = train_keep[tar_model]\n",
    "    top_k = 3\n",
    "    if top_k:\n",
    "        # 仅使用概率向量的前3个值\n",
    "        targetX, _ = get_top_k_conf(top_k, targetX, targetX)\n",
    "\n",
    "    targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "    targetX = targetX.astype(np.float32)\n",
    "    \n",
    "    shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "    shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "    attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "    attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "    attack_test_scores = attack_test_scores.reshape(1, attack_test_scores.shape[0])\n",
    "    attack_test_mem = attack_test_mem.reshape(1, attack_test_mem.shape[0])\n",
    "    if tar_model == 50:\n",
    "        attack_test_scores_all = attack_test_scores\n",
    "        attack_test_mem_all = attack_test_mem\n",
    "    else:\n",
    "        attack_test_scores_all = np.concatenate((attack_test_scores_all, attack_test_scores), axis=0)\n",
    "        attack_test_mem_all = np.concatenate((attack_test_mem_all, attack_test_mem), axis=0)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "ebd65e38-9469-4c5f-a5a3-041b675a9d3c",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 6000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "8ba8d2b7-7d6c-4e17-95bd-f854e8af5af9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.5627466666666666\n",
      "MPLR 0.58459\n",
      "base 0.509075\n"
     ]
    }
   ],
   "source": [
    "pred_result_all = attack_test_scores_all > 0.5\n",
    "pred_clip = pred_result_all[:, risk_rank[:x]]\n",
    "mem_clip = attack_test_mem_all[:, risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result_all[:, pri_risk_rank[:x]]\n",
    "mem_clip = attack_test_mem_all[:, pri_risk_rank[:x]]\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result_all\n",
    "mem_clip = attack_test_mem_all\n",
    "pred_clip = pred_clip.flatten()\n",
    "mem_clip = mem_clip.flatten()\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eebaa471-2271-4310-b936-1030edb6f096",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c4bbfa8f-bd1a-46f6-9b79-a7c758d30f27",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "8cc2e206-47a0-4f9f-80c9-b4a19ebca5a7",
   "metadata": {},
   "source": [
    "### 扰动攻击"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "f06a3e24-5a57-472a-b6f0-78961403f471",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 500"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "40ceb18e-621c-4c72-8ef7-94498c7b65c5",
   "metadata": {},
   "outputs": [],
   "source": [
    "nums = 30\n",
    "# sigma_list = [0.15]\n",
    "sigma_list = [0.05]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "30abf80e-2b00-4b70-ac0c-a7099f987cc4",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073895 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074459 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074098 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074835 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074319 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073920 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073856 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073654 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074407 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074133 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074053 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074212 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073907 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073841 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074634 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074550 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074221 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074654 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074865 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074525 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.073914 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074310 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074204 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074121 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074308 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074893 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074219 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074462 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074378 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074685 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.074261 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "tar_model = 0\n",
    "# 创建对应的目标模型\n",
    "if model in ['NN', 'NN_4layer']:\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](X_data.shape[1], Y_data.max()+1)\n",
    "elif model == 'CNN':\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](Y_data.max()+1, data_name)\n",
    "else:\n",
    "    Target_Model = globals()['create_{}_model'.format(model)](Y_data.max()+1)\n",
    "# 加载参数\n",
    "weight_path = os.path.join(weight_dir, weight_part + \"{}.pth\".format(tar_model))\n",
    "# print(Reference_Model)\n",
    "Target_Model.load_state_dict(torch.load(weight_path))\n",
    "Target_Model.to(device)\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "pred_result, _ = Label_attack(all_dataloader, Target_Model, loss_fn, device, sigma_list, nums)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "9f0ae045-14bd-4159-948c-8881f74cd181",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 6000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "d64311d6-c09d-47b3-8076-f11eb10134aa",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "outlier 0.5586666666666666\n",
      "MPLR 0.5788333333333333\n",
      "base 0.5081333333333333\n"
     ]
    }
   ],
   "source": [
    "pred_clip = pred_result[risk_rank[:x]]\n",
    "mem_clip = train_keep[tar_model][risk_rank[:x]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"outlier\", accuracy)\n",
    "\n",
    "pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "mem_clip = train_keep[tar_model][pri_risk_rank[:x]]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"MPLR\", accuracy)\n",
    "\n",
    "\n",
    "pred_clip = pred_result\n",
    "mem_clip = train_keep[tar_model]\n",
    "accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "print(\"base\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "87d308e9-be9d-40fb-be9e-e84efc339052",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0308f234-469f-4670-b4e4-51973cb27ce4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "57450107-5e15-4050-97b0-c5d7a610e6c7",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "c12b2733-fd92-457f-b557-7a5e0a6825b5",
   "metadata": {},
   "source": [
    "### 绘制训练轮次的影响"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "0118db14-31f2-4324-991e-2f1df8937c9b",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 600\n",
    "shadow_result = []\n",
    "LIRA_result = []\n",
    "gene_distance = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "07396dc8-83ff-4251-8f0f-675c5d30f4a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_dataset = datasets.MNIST(root='../datasets/mnist', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data.numpy()\n",
    "y_test_data = test_dataset.targets.numpy()\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "e4f089cf-cc72-4ae9-8077-e1055dd72782",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.311823  [  128/30013]\n",
      "loss: 0.361949  [12928/30013]\n",
      "loss: 0.235586  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 82.5%\n",
      "Test Error: \n",
      " Accuracy: 95.9%, Avg loss: 0.135950 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 95.6%, Avg loss: 0.147201 \n",
      "\n",
      " Error: \n",
      " Accuracy: 95.5%  \n",
      "\n",
      "AUC value is: 0.49912656039154296\n",
      "Accuracy is: 0.5006666666666667\n",
      "AUC value is: 0.49372069048755185\n",
      "Accuracy is: 0.4955833333333333\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.244482  [  128/30013]\n",
      "loss: 0.236889  [12928/30013]\n",
      "loss: 0.131660  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 96.1%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.183511  [  128/30013]\n",
      "loss: 0.174330  [12928/30013]\n",
      "loss: 0.093643  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 97.3%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.149769  [  128/30013]\n",
      "loss: 0.151574  [12928/30013]\n",
      "loss: 0.074847  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 97.9%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.124542  [  128/30013]\n",
      "loss: 0.136218  [12928/30013]\n",
      "loss: 0.077906  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 98.2%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.104055  [  128/30013]\n",
      "loss: 0.109348  [12928/30013]\n",
      "loss: 0.082552  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 98.5%\n",
      "Test Error: \n",
      " Accuracy: 98.1%, Avg loss: 0.052351 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.6%, Avg loss: 0.043336 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.3%  \n",
      "\n",
      "AUC value is: 0.49987991664411774\n",
      "Accuracy is: 0.5032166666666666\n",
      "AUC value is: 0.49475678401544054\n",
      "Accuracy is: 0.49583333333333335\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.088837  [  128/30013]\n",
      "loss: 0.104945  [12928/30013]\n",
      "loss: 0.082275  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 98.6%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.083015  [  128/30013]\n",
      "loss: 0.082572  [12928/30013]\n",
      "loss: 0.073794  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 98.8%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.081472  [  128/30013]\n",
      "loss: 0.051000  [12928/30013]\n",
      "loss: 0.068278  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.0%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.076283  [  128/30013]\n",
      "loss: 0.026620  [12928/30013]\n",
      "loss: 0.058354  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.1%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.073532  [  128/30013]\n",
      "loss: 0.022399  [12928/30013]\n",
      "loss: 0.039463  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.1%\n",
      "Test Error: \n",
      " Accuracy: 98.0%, Avg loss: 0.066209 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.6%, Avg loss: 0.039690 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.2%  \n",
      "\n",
      "AUC value is: 0.49890124923812346\n",
      "Accuracy is: 0.5028333333333334\n",
      "AUC value is: 0.4966774165983148\n",
      "Accuracy is: 0.4963666666666667\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.060577  [  128/30013]\n",
      "loss: 0.020747  [12928/30013]\n",
      "loss: 0.034016  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.1%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.052683  [  128/30013]\n",
      "loss: 0.026363  [12928/30013]\n",
      "loss: 0.031863  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.3%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.063474  [  128/30013]\n",
      "loss: 0.031112  [12928/30013]\n",
      "loss: 0.038196  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.2%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.047310  [  128/30013]\n",
      "loss: 0.026576  [12928/30013]\n",
      "loss: 0.068856  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.3%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.030821  [  128/30013]\n",
      "loss: 0.027873  [12928/30013]\n",
      "loss: 0.071761  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.3%\n",
      "Test Error: \n",
      " Accuracy: 98.0%, Avg loss: 0.070050 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.027527 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.5%  \n",
      "\n",
      "AUC value is: 0.4993099443148673\n",
      "Accuracy is: 0.5053\n",
      "AUC value is: 0.49837618358397223\n",
      "Accuracy is: 0.49711666666666665\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.024898  [  128/30013]\n",
      "loss: 0.015814  [12928/30013]\n",
      "loss: 0.056185  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.4%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.011028  [  128/30013]\n",
      "loss: 0.014745  [12928/30013]\n",
      "loss: 0.078648  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.4%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.007503  [  128/30013]\n",
      "loss: 0.010975  [12928/30013]\n",
      "loss: 0.100227  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.5%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.010668  [  128/30013]\n",
      "loss: 0.020134  [12928/30013]\n",
      "loss: 0.055661  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.5%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.004614  [  128/30013]\n",
      "loss: 0.036292  [12928/30013]\n",
      "loss: 0.034741  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Test Error: \n",
      " Accuracy: 98.2%, Avg loss: 0.061035 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.2%, Avg loss: 0.022012 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.7%  \n",
      "\n",
      "AUC value is: 0.5009285107299093\n",
      "Accuracy is: 0.5064\n",
      "AUC value is: 0.5000862750162005\n",
      "Accuracy is: 0.49755\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.006704  [  128/30013]\n",
      "loss: 0.012870  [12928/30013]\n",
      "loss: 0.006085  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.025321  [  128/30013]\n",
      "loss: 0.028441  [12928/30013]\n",
      "loss: 0.049319  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.021662  [  128/30013]\n",
      "loss: 0.009616  [12928/30013]\n",
      "loss: 0.035411  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.015353  [  128/30013]\n",
      "loss: 0.014745  [12928/30013]\n",
      "loss: 0.019130  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.025083  [  128/30013]\n",
      "loss: 0.022220  [12928/30013]\n",
      "loss: 0.019356  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Test Error: \n",
      " Accuracy: 98.3%, Avg loss: 0.074852 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.1%, Avg loss: 0.027106 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.5%  \n",
      "\n",
      "AUC value is: 0.5022296876409081\n",
      "Accuracy is: 0.50645\n",
      "AUC value is: 0.5019567887007748\n",
      "Accuracy is: 0.49838333333333334\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.020022  [  128/30013]\n",
      "loss: 0.012221  [12928/30013]\n",
      "loss: 0.014167  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.016461  [  128/30013]\n",
      "loss: 0.011091  [12928/30013]\n",
      "loss: 0.021094  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.035666  [  128/30013]\n",
      "loss: 0.015873  [12928/30013]\n",
      "loss: 0.009695  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.6%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.003759  [  128/30013]\n",
      "loss: 0.023220  [12928/30013]\n",
      "loss: 0.041088  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.006032  [  128/30013]\n",
      "loss: 0.019509  [12928/30013]\n",
      "loss: 0.001095  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.070465 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.5%, Avg loss: 0.014740 \n",
      "\n",
      " Error: \n",
      " Accuracy: 98.9%  \n",
      "\n",
      "AUC value is: 0.5024247554553151\n",
      "Accuracy is: 0.5077166666666667\n",
      "AUC value is: 0.5028386921997099\n",
      "Accuracy is: 0.5006\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.055746  [  128/30013]\n",
      "loss: 0.006942  [12928/30013]\n",
      "loss: 0.015254  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.012832  [  128/30013]\n",
      "loss: 0.022249  [12928/30013]\n",
      "loss: 0.006262  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.038432  [  128/30013]\n",
      "loss: 0.004238  [12928/30013]\n",
      "loss: 0.071687  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.061582  [  128/30013]\n",
      "loss: 0.001143  [12928/30013]\n",
      "loss: 0.004465  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.009074  [  128/30013]\n",
      "loss: 0.004475  [12928/30013]\n",
      "loss: 0.009507  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Test Error: \n",
      " Accuracy: 98.6%, Avg loss: 0.063584 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.8%, Avg loss: 0.005498 \n",
      "\n",
      " Error: \n",
      " Accuracy: 99.2%  \n",
      "\n",
      "AUC value is: 0.502075563723078\n",
      "Accuracy is: 0.5100333333333333\n",
      "AUC value is: 0.5057030094042317\n",
      "Accuracy is: 0.5011833333333333\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.001267  [  128/30013]\n",
      "loss: 0.006007  [12928/30013]\n",
      "loss: 0.007115  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.002686  [  128/30013]\n",
      "loss: 0.015682  [12928/30013]\n",
      "loss: 0.005368  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.001218  [  128/30013]\n",
      "loss: 0.000450  [12928/30013]\n",
      "loss: 0.022571  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.003926  [  128/30013]\n",
      "loss: 0.004668  [12928/30013]\n",
      "loss: 0.019865  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.006129  [  128/30013]\n",
      "loss: 0.002890  [12928/30013]\n",
      "loss: 0.003456  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.075100 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.7%, Avg loss: 0.008044 \n",
      "\n",
      " Error: \n",
      " Accuracy: 99.0%  \n",
      "\n",
      "AUC value is: 0.5037072345850251\n",
      "Accuracy is: 0.5091666666666667\n",
      "AUC value is: 0.5062229617240894\n",
      "Accuracy is: 0.5019166666666667\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.016068  [  128/30013]\n",
      "loss: 0.023410  [12928/30013]\n",
      "loss: 0.024434  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.036702  [  128/30013]\n",
      "loss: 0.005473  [12928/30013]\n",
      "loss: 0.001310  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.008374  [  128/30013]\n",
      "loss: 0.006160  [12928/30013]\n",
      "loss: 0.002805  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.001931  [  128/30013]\n",
      "loss: 0.004134  [12928/30013]\n",
      "loss: 0.007081  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.001002  [  128/30013]\n",
      "loss: 0.015471  [12928/30013]\n",
      "loss: 0.011383  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Test Error: \n",
      " Accuracy: 98.6%, Avg loss: 0.075058 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.7%, Avg loss: 0.009828 \n",
      "\n",
      " Error: \n",
      " Accuracy: 99.1%  \n",
      "\n",
      "AUC value is: 0.5023528876640422\n",
      "Accuracy is: 0.5090333333333333\n",
      "AUC value is: 0.5060653739167202\n",
      "Accuracy is: 0.5021333333333333\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.012777  [  128/30013]\n",
      "loss: 0.000822  [12928/30013]\n",
      "loss: 0.003291  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.001274  [  128/30013]\n",
      "loss: 0.002179  [12928/30013]\n",
      "loss: 0.002970  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.9%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.010021  [  128/30013]\n",
      "loss: 0.006441  [12928/30013]\n",
      "loss: 0.007244  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.7%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.040152  [  128/30013]\n",
      "loss: 0.000563  [12928/30013]\n",
      "loss: 0.002345  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 99.8%\n"
     ]
    }
   ],
   "source": [
    "(x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = load_MNIST(0, 100, prop_keep=0.5, seed=0)\n",
    "training_data = CustomDataset(x_train, y_train, model_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=batch_size)\n",
    "if model in ['NN', 'NN_4layer']:\n",
    "    TargetModel = globals()['create_{}_model'.format(model)](x_train.shape[1], y_train.max()+1)\n",
    "elif model == 'CNN':\n",
    "    TargetModel = globals()['create_{}_model'.format(model)](y_train.max()+1, data_name)\n",
    "# print(TargetModel)\n",
    "TargetModel.to(device)\n",
    "loss_fn = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(TargetModel.parameters(), lr=1e-3, weight_decay=1e-4)\n",
    "for t in range(50):\n",
    "    print(f\"Epoch {t+1}\\n-------------------------------\")\n",
    "    train(train_dataloader, TargetModel, loss_fn, optimizer, device)\n",
    "\n",
    "    if t%5 == 0:\n",
    "        test_acc = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "        train_acc = evaluate(train_dataloader, TargetModel, loss_fn, device)\n",
    "        distance = train_acc - test_acc\n",
    "        gene_distance.append(distance)\n",
    "            \n",
    "        mem_label = train_keep[0]\n",
    "        \n",
    "        conf_data, label_data = get_model_pred(all_dataloader, TargetModel, device)\n",
    "        conf_data = conf_data.detach().cpu().numpy()\n",
    "        label_data = label_data.detach().cpu().numpy()\n",
    "        conf_data = conf_data.astype(np.float64)\n",
    "        score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    \n",
    "        # 执行影子模型攻击\n",
    "        targetX = conf_data\n",
    "        pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "        targetY = mem_label\n",
    "        targetX, _ = get_top_k_conf(3, targetX, targetX)\n",
    "        targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "        targetX = targetX.astype(np.float32)\n",
    "        shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "        shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "        attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "        attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "        accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "        shadow_result.append(accuracy)\n",
    "    \n",
    "        # 执行风险评估攻击\n",
    "        score_tar = cal_score(conf_data.copy(), label_data)\n",
    "        pri_risk_t = pri_risk_all\n",
    "        pri_risk_rank_t = np.argsort(pri_risk_t)\n",
    "        pri_risk_rank_t = np.flip(pri_risk_rank_t)\n",
    "    \n",
    "        pred_result = LIRA_attack(train_keep, score_all, score_tar, mem_label)\n",
    "        evaluate_ROC(pred_result, mem_label, threshold=0)\n",
    "        pred_clip = pred_result[pri_risk_rank_t[:x]]\n",
    "        mem_clip = mem_label[pri_risk_rank_t[:x]]\n",
    "        pred_clip = pred_clip > 0\n",
    "        accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "        LIRA_result.append(accuracy)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a97f3d21-e486-4950-86fb-0c465461b44d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "2a5bd1c7-20e4-4353-bc66-e8d4c02ed81e",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5006666666666667,\n",
       " 0.5032166666666666,\n",
       " 0.5028333333333334,\n",
       " 0.5053,\n",
       " 0.5064,\n",
       " 0.50645,\n",
       " 0.5077166666666667,\n",
       " 0.5100333333333333,\n",
       " 0.5091666666666667,\n",
       " 0.5090333333333333]"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "shadow_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "95f1de6f-81e4-4ac5-8ffc-1a75ec52f5f9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5016666666666667,\n",
       " 0.5266666666666666,\n",
       " 0.5783333333333334,\n",
       " 0.6466666666666666,\n",
       " 0.6766666666666666,\n",
       " 0.6933333333333334,\n",
       " 0.75,\n",
       " 0.8166666666666667,\n",
       " 0.7983333333333333,\n",
       " 0.8083333333333333]"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "LIRA_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "20156568-16d9-42f2-9584-0972bd6098ed",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[-0.0026477959550861296,\n",
       " 0.004706064038916491,\n",
       " 0.006939209675807079,\n",
       " 0.009437780295205478,\n",
       " 0.009303638423349914,\n",
       " 0.007970622730150256,\n",
       " 0.01033557125245721,\n",
       " 0.011667634025255724,\n",
       " 0.011801342751474375,\n",
       " 0.011701212807783246]"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gene_distance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fc006845-82e6-437c-a184-3c0df679bcfc",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3ab4d034-2a8e-4b9d-bf6c-c317bf184f9d",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "62aeb05e-afd4-4eb8-b405-35dfcf5f95f6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "14e30928-67e7-4bc4-8c3b-ff42eafa73db",
   "metadata": {},
   "source": [
    "### 正则化手段的影响"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "44fcbf17-2050-4e38-a8d2-4b716a19509d",
   "metadata": {},
   "outputs": [],
   "source": [
    "x = 600\n",
    "shadow_result = []\n",
    "LIRA_result = []\n",
    "gene_distance = []\n",
    "base_result = []\n",
    "risk_base_result = []\n",
    "test_acc_list = []\n",
    "train_acc_list = []\n",
    "# l2_norm_list = [1e-4, 5e-4, 1e-3, 2e-3, 3e-3, 4e-3, 5e-3, 6e-3, 7e-3, 8e-3, 9e-3, 1e-2]\n",
    "l2_norm_list = [0, 5e-4, 1e-3, 5e-3, 1e-2]\n",
    "# l2_norm_list = [5e-4]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "f45a4d72-6775-43c5-a092-364d12b4007a",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_dataset = datasets.MNIST(root='../datasets/mnist', train=False, transform=None, download=True)\n",
    "x_test_data = test_dataset.data.numpy()\n",
    "y_test_data = test_dataset.targets.numpy()\n",
    "test_data = CustomDataset(x_test_data, y_test_data, model_transform)\n",
    "test_dataloader = DataLoader(test_data, batch_size=batch_size)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "06d11cf4-b3bb-4833-9ab4-83de2eb3d215",
   "metadata": {},
   "outputs": [],
   "source": [
    "class M_CNN_dropout(nn.Module):\n",
    "    def __init__(self, num_classes):\n",
    "        super().__init__()\n",
    "        self.conv1 = nn.Conv2d(1, 16, 8, 2, padding=3)\n",
    "        self.conv2 = nn.Conv2d(16, 32, 4, 2)\n",
    "        self.fc1 = nn.Linear(32 * 4 * 4, 32)\n",
    "        self.fc2 = nn.Linear(32, num_classes)\n",
    "        self.dropout = nn.Dropout(p=0.5)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x of shape [B, 1, 28, 28]\n",
    "        x = F.relu(self.conv1(x))  # -> [B, 16, 14, 14]\n",
    "        x = F.max_pool2d(x, 2, 1)  # -> [B, 16, 13, 13]\n",
    "        x = self.dropout(x)\n",
    "        x = F.relu(self.conv2(x))  # -> [B, 32, 5, 5]\n",
    "        x = F.max_pool2d(x, 2, 1)  # -> [B, 32, 4, 4]\n",
    "        x = self.dropout(x)\n",
    "        x = x.view(-1, 32 * 4 * 4)  # -> [B, 512]\n",
    "        x = F.relu(self.fc1(x))  # -> [B, 32]\n",
    "        x = self.dropout(x)\n",
    "        x = self.fc2(x)  # -> [B, 10]\n",
    "        # pred_probab = nn.LogSoftmax(dim=1)(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "334401a9-9cbb-4c4f-a36e-afa8eb29087a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.344853  [  128/30013]\n",
      "loss: 1.089399  [12928/30013]\n",
      "loss: 0.972760  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 56.6%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.821743  [  128/30013]\n",
      "loss: 0.696874  [12928/30013]\n",
      "loss: 0.581352  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 81.2%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.586635  [  128/30013]\n",
      "loss: 0.677305  [12928/30013]\n",
      "loss: 0.559065  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 86.3%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.470186  [  128/30013]\n",
      "loss: 0.521207  [12928/30013]\n",
      "loss: 0.396552  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 87.7%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.304216  [  128/30013]\n",
      "loss: 0.397626  [12928/30013]\n",
      "loss: 0.372517  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.0%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.380081  [  128/30013]\n",
      "loss: 0.477061  [12928/30013]\n",
      "loss: 0.286255  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.6%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.253203  [  128/30013]\n",
      "loss: 0.418163  [12928/30013]\n",
      "loss: 0.322601  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.4%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.351701  [  128/30013]\n",
      "loss: 0.367921  [12928/30013]\n",
      "loss: 0.343972  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.8%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.307266  [  128/30013]\n",
      "loss: 0.465945  [12928/30013]\n",
      "loss: 0.374952  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.304277  [  128/30013]\n",
      "loss: 0.343655  [12928/30013]\n",
      "loss: 0.311059  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.290277  [  128/30013]\n",
      "loss: 0.260387  [12928/30013]\n",
      "loss: 0.302718  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.260559  [  128/30013]\n",
      "loss: 0.299445  [12928/30013]\n",
      "loss: 0.347821  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.0%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.356054  [  128/30013]\n",
      "loss: 0.342167  [12928/30013]\n",
      "loss: 0.225725  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.2%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.230431  [  128/30013]\n",
      "loss: 0.373736  [12928/30013]\n",
      "loss: 0.343083  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.266970  [  128/30013]\n",
      "loss: 0.270174  [12928/30013]\n",
      "loss: 0.250475  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.289734  [  128/30013]\n",
      "loss: 0.286590  [12928/30013]\n",
      "loss: 0.290485  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.301115  [  128/30013]\n",
      "loss: 0.353485  [12928/30013]\n",
      "loss: 0.267213  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.362786  [  128/30013]\n",
      "loss: 0.235148  [12928/30013]\n",
      "loss: 0.254512  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.267978  [  128/30013]\n",
      "loss: 0.252504  [12928/30013]\n",
      "loss: 0.267504  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.8%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.273634  [  128/30013]\n",
      "loss: 0.315871  [12928/30013]\n",
      "loss: 0.333318  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.0%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.289549  [  128/30013]\n",
      "loss: 0.207817  [12928/30013]\n",
      "loss: 0.327848  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.0%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.265875  [  128/30013]\n",
      "loss: 0.242709  [12928/30013]\n",
      "loss: 0.200028  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.1%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.439472  [  128/30013]\n",
      "loss: 0.191263  [12928/30013]\n",
      "loss: 0.322603  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.262940  [  128/30013]\n",
      "loss: 0.175210  [12928/30013]\n",
      "loss: 0.291316  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.241185  [  128/30013]\n",
      "loss: 0.237544  [12928/30013]\n",
      "loss: 0.180750  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.351587  [  128/30013]\n",
      "loss: 0.334047  [12928/30013]\n",
      "loss: 0.259496  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.294411  [  128/30013]\n",
      "loss: 0.255571  [12928/30013]\n",
      "loss: 0.182442  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.176289  [  128/30013]\n",
      "loss: 0.285236  [12928/30013]\n",
      "loss: 0.302103  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.280160  [  128/30013]\n",
      "loss: 0.288205  [12928/30013]\n",
      "loss: 0.209700  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.6%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.271142  [  128/30013]\n",
      "loss: 0.148632  [12928/30013]\n",
      "loss: 0.177364  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.326677  [  128/30013]\n",
      "loss: 0.344424  [12928/30013]\n",
      "loss: 0.195446  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.252455  [  128/30013]\n",
      "loss: 0.242276  [12928/30013]\n",
      "loss: 0.297101  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.226881  [  128/30013]\n",
      "loss: 0.197692  [12928/30013]\n",
      "loss: 0.199953  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.211671  [  128/30013]\n",
      "loss: 0.193917  [12928/30013]\n",
      "loss: 0.129177  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.231185  [  128/30013]\n",
      "loss: 0.367921  [12928/30013]\n",
      "loss: 0.211749  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.217110  [  128/30013]\n",
      "loss: 0.211672  [12928/30013]\n",
      "loss: 0.212345  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.272966  [  128/30013]\n",
      "loss: 0.326891  [12928/30013]\n",
      "loss: 0.274004  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.258537  [  128/30013]\n",
      "loss: 0.272541  [12928/30013]\n",
      "loss: 0.173671  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.1%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.199249  [  128/30013]\n",
      "loss: 0.150386  [12928/30013]\n",
      "loss: 0.238112  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.256396  [  128/30013]\n",
      "loss: 0.356888  [12928/30013]\n",
      "loss: 0.199012  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.232653  [  128/30013]\n",
      "loss: 0.122524  [12928/30013]\n",
      "loss: 0.141779  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.203907  [  128/30013]\n",
      "loss: 0.158364  [12928/30013]\n",
      "loss: 0.285837  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.1%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.108127  [  128/30013]\n",
      "loss: 0.241806  [12928/30013]\n",
      "loss: 0.218236  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.224426  [  128/30013]\n",
      "loss: 0.235862  [12928/30013]\n",
      "loss: 0.276129  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.198557  [  128/30013]\n",
      "loss: 0.294264  [12928/30013]\n",
      "loss: 0.226305  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.3%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.126293  [  128/30013]\n",
      "loss: 0.174812  [12928/30013]\n",
      "loss: 0.235520  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.3%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.198500  [  128/30013]\n",
      "loss: 0.230904  [12928/30013]\n",
      "loss: 0.145228  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.1%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.239615  [  128/30013]\n",
      "loss: 0.160277  [12928/30013]\n",
      "loss: 0.233102  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.2%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.209909  [  128/30013]\n",
      "loss: 0.182349  [12928/30013]\n",
      "loss: 0.377357  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.4%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.243559  [  128/30013]\n",
      "loss: 0.149475  [12928/30013]\n",
      "loss: 0.180975  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.3%\n",
      "Test Error: \n",
      " Accuracy: 98.8%, Avg loss: 0.038765 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 99.0%, Avg loss: 0.033488 \n",
      "\n",
      "train: 0.9898044180854962 test: 0.9878\n",
      " Error: \n",
      " Accuracy: 98.7%  \n",
      "\n",
      "AUC value is: 0.5062436067279661\n",
      "Accuracy is: 0.5045166666666666\n",
      "AUC value is: 0.4951390363094413\n",
      "Accuracy is: 0.49585\n",
      "Test Error: \n",
      " Accuracy: 98.7%, Avg loss: 0.043323 \n",
      "\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.327717  [  128/30013]\n",
      "loss: 1.282756  [12928/30013]\n",
      "loss: 0.734597  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 52.8%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.693023  [  128/30013]\n",
      "loss: 0.632127  [12928/30013]\n",
      "loss: 0.574792  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 82.0%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.418318  [  128/30013]\n",
      "loss: 0.483370  [12928/30013]\n",
      "loss: 0.541703  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 86.5%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.526648  [  128/30013]\n",
      "loss: 0.413544  [12928/30013]\n",
      "loss: 0.386357  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 87.7%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.340107  [  128/30013]\n",
      "loss: 0.439012  [12928/30013]\n",
      "loss: 0.424292  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 88.6%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.325306  [  128/30013]\n",
      "loss: 0.344157  [12928/30013]\n",
      "loss: 0.406623  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.5%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.303078  [  128/30013]\n",
      "loss: 0.456697  [12928/30013]\n",
      "loss: 0.278530  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.0%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.356353  [  128/30013]\n",
      "loss: 0.386327  [12928/30013]\n",
      "loss: 0.309294  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.7%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.323994  [  128/30013]\n",
      "loss: 0.241159  [12928/30013]\n",
      "loss: 0.286349  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.308393  [  128/30013]\n",
      "loss: 0.358582  [12928/30013]\n",
      "loss: 0.323757  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.235323  [  128/30013]\n",
      "loss: 0.273068  [12928/30013]\n",
      "loss: 0.332395  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.8%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.381082  [  128/30013]\n",
      "loss: 0.302797  [12928/30013]\n",
      "loss: 0.362643  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.303990  [  128/30013]\n",
      "loss: 0.329429  [12928/30013]\n",
      "loss: 0.296181  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.9%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.227900  [  128/30013]\n",
      "loss: 0.310280  [12928/30013]\n",
      "loss: 0.396385  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.276020  [  128/30013]\n",
      "loss: 0.308673  [12928/30013]\n",
      "loss: 0.249034  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.330170  [  128/30013]\n",
      "loss: 0.240862  [12928/30013]\n",
      "loss: 0.248404  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.6%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.195441  [  128/30013]\n",
      "loss: 0.272226  [12928/30013]\n",
      "loss: 0.429706  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.243607  [  128/30013]\n",
      "loss: 0.311848  [12928/30013]\n",
      "loss: 0.209399  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.268365  [  128/30013]\n",
      "loss: 0.262229  [12928/30013]\n",
      "loss: 0.248519  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.246701  [  128/30013]\n",
      "loss: 0.287617  [12928/30013]\n",
      "loss: 0.203444  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.271654  [  128/30013]\n",
      "loss: 0.326511  [12928/30013]\n",
      "loss: 0.269621  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.249843  [  128/30013]\n",
      "loss: 0.224441  [12928/30013]\n",
      "loss: 0.202414  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.0%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.238654  [  128/30013]\n",
      "loss: 0.264559  [12928/30013]\n",
      "loss: 0.216959  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.9%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.154526  [  128/30013]\n",
      "loss: 0.262510  [12928/30013]\n",
      "loss: 0.290772  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.1%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.198436  [  128/30013]\n",
      "loss: 0.268665  [12928/30013]\n",
      "loss: 0.271470  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.1%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.269020  [  128/30013]\n",
      "loss: 0.305013  [12928/30013]\n",
      "loss: 0.245603  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.207933  [  128/30013]\n",
      "loss: 0.378974  [12928/30013]\n",
      "loss: 0.223158  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.271251  [  128/30013]\n",
      "loss: 0.195187  [12928/30013]\n",
      "loss: 0.266353  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.209674  [  128/30013]\n",
      "loss: 0.250479  [12928/30013]\n",
      "loss: 0.272534  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.241850  [  128/30013]\n",
      "loss: 0.208285  [12928/30013]\n",
      "loss: 0.291753  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.280506  [  128/30013]\n",
      "loss: 0.322858  [12928/30013]\n",
      "loss: 0.336919  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.271319  [  128/30013]\n",
      "loss: 0.152212  [12928/30013]\n",
      "loss: 0.276459  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.240765  [  128/30013]\n",
      "loss: 0.141965  [12928/30013]\n",
      "loss: 0.215055  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.262101  [  128/30013]\n",
      "loss: 0.254957  [12928/30013]\n",
      "loss: 0.275057  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.178170  [  128/30013]\n",
      "loss: 0.441574  [12928/30013]\n",
      "loss: 0.183805  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.204564  [  128/30013]\n",
      "loss: 0.259283  [12928/30013]\n",
      "loss: 0.159440  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.249079  [  128/30013]\n",
      "loss: 0.309631  [12928/30013]\n",
      "loss: 0.363120  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.257507  [  128/30013]\n",
      "loss: 0.210635  [12928/30013]\n",
      "loss: 0.244767  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.228486  [  128/30013]\n",
      "loss: 0.163816  [12928/30013]\n",
      "loss: 0.236179  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.193651  [  128/30013]\n",
      "loss: 0.383222  [12928/30013]\n",
      "loss: 0.332366  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.0%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.288183  [  128/30013]\n",
      "loss: 0.326979  [12928/30013]\n",
      "loss: 0.200876  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.186294  [  128/30013]\n",
      "loss: 0.268224  [12928/30013]\n",
      "loss: 0.188833  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.194415  [  128/30013]\n",
      "loss: 0.277019  [12928/30013]\n",
      "loss: 0.256870  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.0%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.221543  [  128/30013]\n",
      "loss: 0.375831  [12928/30013]\n",
      "loss: 0.299735  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.271342  [  128/30013]\n",
      "loss: 0.211467  [12928/30013]\n",
      "loss: 0.201767  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.279294  [  128/30013]\n",
      "loss: 0.306264  [12928/30013]\n",
      "loss: 0.308860  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.0%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.246612  [  128/30013]\n",
      "loss: 0.171890  [12928/30013]\n",
      "loss: 0.223227  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.158364  [  128/30013]\n",
      "loss: 0.237137  [12928/30013]\n",
      "loss: 0.242891  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 94.1%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.238260  [  128/30013]\n",
      "loss: 0.185455  [12928/30013]\n",
      "loss: 0.199515  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.300939  [  128/30013]\n",
      "loss: 0.302188  [12928/30013]\n",
      "loss: 0.155559  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.047166 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.8%, Avg loss: 0.042969 \n",
      "\n",
      "train: 0.9878052843767701 test: 0.9854\n",
      " Error: \n",
      " Accuracy: 98.5%  \n",
      "\n",
      "AUC value is: 0.5033770700785832\n",
      "Accuracy is: 0.5029166666666667\n",
      "AUC value is: 0.4948020940239488\n",
      "Accuracy is: 0.4956833333333333\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.052047 \n",
      "\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.331173  [  128/30013]\n",
      "loss: 1.114476  [12928/30013]\n",
      "loss: 0.749988  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 55.9%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.701851  [  128/30013]\n",
      "loss: 0.788121  [12928/30013]\n",
      "loss: 0.504266  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 82.0%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.462783  [  128/30013]\n",
      "loss: 0.446523  [12928/30013]\n",
      "loss: 0.427575  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 86.5%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.367285  [  128/30013]\n",
      "loss: 0.370890  [12928/30013]\n",
      "loss: 0.426939  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 88.1%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.424298  [  128/30013]\n",
      "loss: 0.349621  [12928/30013]\n",
      "loss: 0.403682  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.0%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.384336  [  128/30013]\n",
      "loss: 0.297208  [12928/30013]\n",
      "loss: 0.398602  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.8%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.342820  [  128/30013]\n",
      "loss: 0.406783  [12928/30013]\n",
      "loss: 0.299436  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.2%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.291710  [  128/30013]\n",
      "loss: 0.296153  [12928/30013]\n",
      "loss: 0.283407  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.9%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.300281  [  128/30013]\n",
      "loss: 0.345328  [12928/30013]\n",
      "loss: 0.213430  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.398042  [  128/30013]\n",
      "loss: 0.387335  [12928/30013]\n",
      "loss: 0.266090  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.274263  [  128/30013]\n",
      "loss: 0.375094  [12928/30013]\n",
      "loss: 0.229306  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.218232  [  128/30013]\n",
      "loss: 0.362624  [12928/30013]\n",
      "loss: 0.284147  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.9%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.350173  [  128/30013]\n",
      "loss: 0.362730  [12928/30013]\n",
      "loss: 0.239040  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.357252  [  128/30013]\n",
      "loss: 0.336341  [12928/30013]\n",
      "loss: 0.324269  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.275757  [  128/30013]\n",
      "loss: 0.405217  [12928/30013]\n",
      "loss: 0.284195  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.200004  [  128/30013]\n",
      "loss: 0.243958  [12928/30013]\n",
      "loss: 0.281919  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.323592  [  128/30013]\n",
      "loss: 0.295432  [12928/30013]\n",
      "loss: 0.312895  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.250482  [  128/30013]\n",
      "loss: 0.371657  [12928/30013]\n",
      "loss: 0.229228  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.204866  [  128/30013]\n",
      "loss: 0.332368  [12928/30013]\n",
      "loss: 0.229147  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.262513  [  128/30013]\n",
      "loss: 0.333677  [12928/30013]\n",
      "loss: 0.273628  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.8%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.264820  [  128/30013]\n",
      "loss: 0.303616  [12928/30013]\n",
      "loss: 0.261843  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.298466  [  128/30013]\n",
      "loss: 0.239881  [12928/30013]\n",
      "loss: 0.305489  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.7%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.231130  [  128/30013]\n",
      "loss: 0.263784  [12928/30013]\n",
      "loss: 0.251083  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.227184  [  128/30013]\n",
      "loss: 0.178453  [12928/30013]\n",
      "loss: 0.174231  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.0%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.265527  [  128/30013]\n",
      "loss: 0.193946  [12928/30013]\n",
      "loss: 0.266863  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.9%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.195239  [  128/30013]\n",
      "loss: 0.203568  [12928/30013]\n",
      "loss: 0.301043  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.9%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.194035  [  128/30013]\n",
      "loss: 0.254449  [12928/30013]\n",
      "loss: 0.400890  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.0%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.163476  [  128/30013]\n",
      "loss: 0.172523  [12928/30013]\n",
      "loss: 0.280441  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.239859  [  128/30013]\n",
      "loss: 0.230859  [12928/30013]\n",
      "loss: 0.313689  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.2%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.197117  [  128/30013]\n",
      "loss: 0.336086  [12928/30013]\n",
      "loss: 0.221479  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.224253  [  128/30013]\n",
      "loss: 0.206956  [12928/30013]\n",
      "loss: 0.173177  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.274442  [  128/30013]\n",
      "loss: 0.317527  [12928/30013]\n",
      "loss: 0.215291  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.201512  [  128/30013]\n",
      "loss: 0.245869  [12928/30013]\n",
      "loss: 0.265842  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.292706  [  128/30013]\n",
      "loss: 0.211104  [12928/30013]\n",
      "loss: 0.203779  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.228200  [  128/30013]\n",
      "loss: 0.187969  [12928/30013]\n",
      "loss: 0.300376  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.6%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.149459  [  128/30013]\n",
      "loss: 0.301709  [12928/30013]\n",
      "loss: 0.282602  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.3%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.149043  [  128/30013]\n",
      "loss: 0.187280  [12928/30013]\n",
      "loss: 0.184025  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.5%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.225594  [  128/30013]\n",
      "loss: 0.325498  [12928/30013]\n",
      "loss: 0.222703  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.6%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.248499  [  128/30013]\n",
      "loss: 0.303638  [12928/30013]\n",
      "loss: 0.232882  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.6%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.224170  [  128/30013]\n",
      "loss: 0.195346  [12928/30013]\n",
      "loss: 0.189072  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.4%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.194654  [  128/30013]\n",
      "loss: 0.236322  [12928/30013]\n",
      "loss: 0.177699  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.200002  [  128/30013]\n",
      "loss: 0.253872  [12928/30013]\n",
      "loss: 0.194715  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.273877  [  128/30013]\n",
      "loss: 0.218703  [12928/30013]\n",
      "loss: 0.163365  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.291727  [  128/30013]\n",
      "loss: 0.300753  [12928/30013]\n",
      "loss: 0.239104  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.6%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.213082  [  128/30013]\n",
      "loss: 0.126812  [12928/30013]\n",
      "loss: 0.209377  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.9%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.251659  [  128/30013]\n",
      "loss: 0.261428  [12928/30013]\n",
      "loss: 0.243075  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.208884  [  128/30013]\n",
      "loss: 0.274349  [12928/30013]\n",
      "loss: 0.243302  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.178261  [  128/30013]\n",
      "loss: 0.290049  [12928/30013]\n",
      "loss: 0.196455  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.7%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.202960  [  128/30013]\n",
      "loss: 0.189688  [12928/30013]\n",
      "loss: 0.218702  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.187092  [  128/30013]\n",
      "loss: 0.404480  [12928/30013]\n",
      "loss: 0.270014  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 93.8%\n",
      "Test Error: \n",
      " Accuracy: 98.7%, Avg loss: 0.043383 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.8%, Avg loss: 0.041418 \n",
      "\n",
      "train: 0.9880051977476427 test: 0.987\n",
      " Error: \n",
      " Accuracy: 98.5%  \n",
      "\n",
      "AUC value is: 0.5059231583344598\n",
      "Accuracy is: 0.5038833333333333\n",
      "AUC value is: 0.4949021340427341\n",
      "Accuracy is: 0.49573333333333336\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.049285 \n",
      "\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.338571  [  128/30013]\n",
      "loss: 1.262041  [12928/30013]\n",
      "loss: 0.833376  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 54.9%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.725239  [  128/30013]\n",
      "loss: 0.466366  [12928/30013]\n",
      "loss: 0.358780  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 83.4%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.541961  [  128/30013]\n",
      "loss: 0.516331  [12928/30013]\n",
      "loss: 0.342387  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 87.8%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.494677  [  128/30013]\n",
      "loss: 0.437353  [12928/30013]\n",
      "loss: 0.389048  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 88.9%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.290545  [  128/30013]\n",
      "loss: 0.330691  [12928/30013]\n",
      "loss: 0.350512  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.4%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.467918  [  128/30013]\n",
      "loss: 0.366284  [12928/30013]\n",
      "loss: 0.396447  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.2%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.304801  [  128/30013]\n",
      "loss: 0.618553  [12928/30013]\n",
      "loss: 0.314717  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.4%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.250996  [  128/30013]\n",
      "loss: 0.477926  [12928/30013]\n",
      "loss: 0.297088  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.8%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.286351  [  128/30013]\n",
      "loss: 0.418327  [12928/30013]\n",
      "loss: 0.407469  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.337865  [  128/30013]\n",
      "loss: 0.479090  [12928/30013]\n",
      "loss: 0.300249  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.319382  [  128/30013]\n",
      "loss: 0.288795  [12928/30013]\n",
      "loss: 0.252549  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.278334  [  128/30013]\n",
      "loss: 0.385699  [12928/30013]\n",
      "loss: 0.340122  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.258747  [  128/30013]\n",
      "loss: 0.385903  [12928/30013]\n",
      "loss: 0.275229  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.231495  [  128/30013]\n",
      "loss: 0.383944  [12928/30013]\n",
      "loss: 0.320746  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.230294  [  128/30013]\n",
      "loss: 0.236844  [12928/30013]\n",
      "loss: 0.253416  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.250656  [  128/30013]\n",
      "loss: 0.346318  [12928/30013]\n",
      "loss: 0.350209  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.8%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.234202  [  128/30013]\n",
      "loss: 0.407511  [12928/30013]\n",
      "loss: 0.188236  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.0%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.300824  [  128/30013]\n",
      "loss: 0.292998  [12928/30013]\n",
      "loss: 0.296700  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.8%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.160073  [  128/30013]\n",
      "loss: 0.304183  [12928/30013]\n",
      "loss: 0.404383  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.8%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.237572  [  128/30013]\n",
      "loss: 0.284878  [12928/30013]\n",
      "loss: 0.245819  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.9%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.287075  [  128/30013]\n",
      "loss: 0.285218  [12928/30013]\n",
      "loss: 0.267512  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.9%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.318610  [  128/30013]\n",
      "loss: 0.396219  [12928/30013]\n",
      "loss: 0.293301  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.265092  [  128/30013]\n",
      "loss: 0.370980  [12928/30013]\n",
      "loss: 0.380946  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.285333  [  128/30013]\n",
      "loss: 0.289428  [12928/30013]\n",
      "loss: 0.228218  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.0%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.246354  [  128/30013]\n",
      "loss: 0.306953  [12928/30013]\n",
      "loss: 0.357207  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.256305  [  128/30013]\n",
      "loss: 0.161287  [12928/30013]\n",
      "loss: 0.256427  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.283388  [  128/30013]\n",
      "loss: 0.295197  [12928/30013]\n",
      "loss: 0.237465  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.0%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.262056  [  128/30013]\n",
      "loss: 0.274316  [12928/30013]\n",
      "loss: 0.277686  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.273332  [  128/30013]\n",
      "loss: 0.364063  [12928/30013]\n",
      "loss: 0.295807  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.254632  [  128/30013]\n",
      "loss: 0.339569  [12928/30013]\n",
      "loss: 0.334869  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.1%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.334304  [  128/30013]\n",
      "loss: 0.308419  [12928/30013]\n",
      "loss: 0.235866  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.2%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.283015  [  128/30013]\n",
      "loss: 0.215895  [12928/30013]\n",
      "loss: 0.292053  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.251473  [  128/30013]\n",
      "loss: 0.313418  [12928/30013]\n",
      "loss: 0.317425  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.2%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.281439  [  128/30013]\n",
      "loss: 0.337932  [12928/30013]\n",
      "loss: 0.389402  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.279928  [  128/30013]\n",
      "loss: 0.268277  [12928/30013]\n",
      "loss: 0.244540  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.290154  [  128/30013]\n",
      "loss: 0.290542  [12928/30013]\n",
      "loss: 0.228478  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.286983  [  128/30013]\n",
      "loss: 0.257189  [12928/30013]\n",
      "loss: 0.306778  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.3%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.281532  [  128/30013]\n",
      "loss: 0.297613  [12928/30013]\n",
      "loss: 0.289913  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.261327  [  128/30013]\n",
      "loss: 0.309479  [12928/30013]\n",
      "loss: 0.338151  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.6%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.249918  [  128/30013]\n",
      "loss: 0.394027  [12928/30013]\n",
      "loss: 0.261733  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.259454  [  128/30013]\n",
      "loss: 0.309360  [12928/30013]\n",
      "loss: 0.280405  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.216425  [  128/30013]\n",
      "loss: 0.301430  [12928/30013]\n",
      "loss: 0.266089  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.2%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.359973  [  128/30013]\n",
      "loss: 0.269413  [12928/30013]\n",
      "loss: 0.267062  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.6%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.193824  [  128/30013]\n",
      "loss: 0.181593  [12928/30013]\n",
      "loss: 0.315084  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.282670  [  128/30013]\n",
      "loss: 0.352462  [12928/30013]\n",
      "loss: 0.314335  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.6%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.228764  [  128/30013]\n",
      "loss: 0.324866  [12928/30013]\n",
      "loss: 0.227271  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.4%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.281566  [  128/30013]\n",
      "loss: 0.320821  [12928/30013]\n",
      "loss: 0.225205  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.2%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.380460  [  128/30013]\n",
      "loss: 0.337430  [12928/30013]\n",
      "loss: 0.323032  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.356268  [  128/30013]\n",
      "loss: 0.323928  [12928/30013]\n",
      "loss: 0.231298  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.241760  [  128/30013]\n",
      "loss: 0.246879  [12928/30013]\n",
      "loss: 0.202975  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 92.5%\n",
      "Test Error: \n",
      " Accuracy: 98.5%, Avg loss: 0.050460 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.4%, Avg loss: 0.053671 \n",
      "\n",
      "train: 0.9843068003864992 test: 0.9851\n",
      " Error: \n",
      " Accuracy: 98.2%  \n",
      "\n",
      "AUC value is: 0.501928733139951\n",
      "Accuracy is: 0.5013666666666666\n",
      "AUC value is: 0.4942803683704247\n",
      "Accuracy is: 0.4956\n",
      "Test Error: \n",
      " Accuracy: 98.2%, Avg loss: 0.058859 \n",
      "\n",
      "Epoch 1\n",
      "-------------------------------\n",
      "loss: 2.302443  [  128/30013]\n",
      "loss: 1.197158  [12928/30013]\n",
      "loss: 0.753145  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 57.8%\n",
      "Epoch 2\n",
      "-------------------------------\n",
      "loss: 0.671356  [  128/30013]\n",
      "loss: 0.513342  [12928/30013]\n",
      "loss: 0.463533  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 83.6%\n",
      "Epoch 3\n",
      "-------------------------------\n",
      "loss: 0.510633  [  128/30013]\n",
      "loss: 0.550422  [12928/30013]\n",
      "loss: 0.436305  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 87.1%\n",
      "Epoch 4\n",
      "-------------------------------\n",
      "loss: 0.363188  [  128/30013]\n",
      "loss: 0.390180  [12928/30013]\n",
      "loss: 0.432063  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 88.5%\n",
      "Epoch 5\n",
      "-------------------------------\n",
      "loss: 0.346475  [  128/30013]\n",
      "loss: 0.426073  [12928/30013]\n",
      "loss: 0.427235  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.3%\n",
      "Epoch 6\n",
      "-------------------------------\n",
      "loss: 0.392577  [  128/30013]\n",
      "loss: 0.424365  [12928/30013]\n",
      "loss: 0.334640  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 89.7%\n",
      "Epoch 7\n",
      "-------------------------------\n",
      "loss: 0.360399  [  128/30013]\n",
      "loss: 0.436902  [12928/30013]\n",
      "loss: 0.309126  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.2%\n",
      "Epoch 8\n",
      "-------------------------------\n",
      "loss: 0.489097  [  128/30013]\n",
      "loss: 0.362686  [12928/30013]\n",
      "loss: 0.244554  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.6%\n",
      "Epoch 9\n",
      "-------------------------------\n",
      "loss: 0.340674  [  128/30013]\n",
      "loss: 0.390516  [12928/30013]\n",
      "loss: 0.342983  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.4%\n",
      "Epoch 10\n",
      "-------------------------------\n",
      "loss: 0.327215  [  128/30013]\n",
      "loss: 0.474144  [12928/30013]\n",
      "loss: 0.383665  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.7%\n",
      "Epoch 11\n",
      "-------------------------------\n",
      "loss: 0.321807  [  128/30013]\n",
      "loss: 0.346666  [12928/30013]\n",
      "loss: 0.351297  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 12\n",
      "-------------------------------\n",
      "loss: 0.317182  [  128/30013]\n",
      "loss: 0.355490  [12928/30013]\n",
      "loss: 0.268218  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.9%\n",
      "Epoch 13\n",
      "-------------------------------\n",
      "loss: 0.355842  [  128/30013]\n",
      "loss: 0.345775  [12928/30013]\n",
      "loss: 0.325817  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.1%\n",
      "Epoch 14\n",
      "-------------------------------\n",
      "loss: 0.245451  [  128/30013]\n",
      "loss: 0.372604  [12928/30013]\n",
      "loss: 0.307103  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.1%\n",
      "Epoch 15\n",
      "-------------------------------\n",
      "loss: 0.367970  [  128/30013]\n",
      "loss: 0.383119  [12928/30013]\n",
      "loss: 0.435656  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.0%\n",
      "Epoch 16\n",
      "-------------------------------\n",
      "loss: 0.317700  [  128/30013]\n",
      "loss: 0.226721  [12928/30013]\n",
      "loss: 0.372318  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 90.9%\n",
      "Epoch 17\n",
      "-------------------------------\n",
      "loss: 0.275525  [  128/30013]\n",
      "loss: 0.413555  [12928/30013]\n",
      "loss: 0.248872  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 18\n",
      "-------------------------------\n",
      "loss: 0.326244  [  128/30013]\n",
      "loss: 0.328654  [12928/30013]\n",
      "loss: 0.284611  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 19\n",
      "-------------------------------\n",
      "loss: 0.299409  [  128/30013]\n",
      "loss: 0.276537  [12928/30013]\n",
      "loss: 0.286445  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 20\n",
      "-------------------------------\n",
      "loss: 0.247938  [  128/30013]\n",
      "loss: 0.277477  [12928/30013]\n",
      "loss: 0.342052  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.2%\n",
      "Epoch 21\n",
      "-------------------------------\n",
      "loss: 0.311955  [  128/30013]\n",
      "loss: 0.484201  [12928/30013]\n",
      "loss: 0.303549  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.2%\n",
      "Epoch 22\n",
      "-------------------------------\n",
      "loss: 0.289728  [  128/30013]\n",
      "loss: 0.294937  [12928/30013]\n",
      "loss: 0.276454  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.2%\n",
      "Epoch 23\n",
      "-------------------------------\n",
      "loss: 0.268517  [  128/30013]\n",
      "loss: 0.386813  [12928/30013]\n",
      "loss: 0.321269  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 24\n",
      "-------------------------------\n",
      "loss: 0.380332  [  128/30013]\n",
      "loss: 0.370753  [12928/30013]\n",
      "loss: 0.299450  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 25\n",
      "-------------------------------\n",
      "loss: 0.303989  [  128/30013]\n",
      "loss: 0.337375  [12928/30013]\n",
      "loss: 0.258502  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 26\n",
      "-------------------------------\n",
      "loss: 0.326921  [  128/30013]\n",
      "loss: 0.337873  [12928/30013]\n",
      "loss: 0.332721  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 27\n",
      "-------------------------------\n",
      "loss: 0.264561  [  128/30013]\n",
      "loss: 0.402788  [12928/30013]\n",
      "loss: 0.268934  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 28\n",
      "-------------------------------\n",
      "loss: 0.446984  [  128/30013]\n",
      "loss: 0.316441  [12928/30013]\n",
      "loss: 0.300642  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 29\n",
      "-------------------------------\n",
      "loss: 0.221313  [  128/30013]\n",
      "loss: 0.332944  [12928/30013]\n",
      "loss: 0.287723  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 30\n",
      "-------------------------------\n",
      "loss: 0.345655  [  128/30013]\n",
      "loss: 0.344520  [12928/30013]\n",
      "loss: 0.441613  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 31\n",
      "-------------------------------\n",
      "loss: 0.346999  [  128/30013]\n",
      "loss: 0.262753  [12928/30013]\n",
      "loss: 0.274762  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 32\n",
      "-------------------------------\n",
      "loss: 0.256569  [  128/30013]\n",
      "loss: 0.355219  [12928/30013]\n",
      "loss: 0.333255  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 33\n",
      "-------------------------------\n",
      "loss: 0.414231  [  128/30013]\n",
      "loss: 0.353398  [12928/30013]\n",
      "loss: 0.313105  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 34\n",
      "-------------------------------\n",
      "loss: 0.328573  [  128/30013]\n",
      "loss: 0.273580  [12928/30013]\n",
      "loss: 0.278285  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 35\n",
      "-------------------------------\n",
      "loss: 0.370736  [  128/30013]\n",
      "loss: 0.392332  [12928/30013]\n",
      "loss: 0.387995  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.3%\n",
      "Epoch 36\n",
      "-------------------------------\n",
      "loss: 0.257980  [  128/30013]\n",
      "loss: 0.441880  [12928/30013]\n",
      "loss: 0.307545  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 37\n",
      "-------------------------------\n",
      "loss: 0.367704  [  128/30013]\n",
      "loss: 0.313921  [12928/30013]\n",
      "loss: 0.253294  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 38\n",
      "-------------------------------\n",
      "loss: 0.370623  [  128/30013]\n",
      "loss: 0.295989  [12928/30013]\n",
      "loss: 0.317588  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.5%\n",
      "Epoch 39\n",
      "-------------------------------\n",
      "loss: 0.244124  [  128/30013]\n",
      "loss: 0.328788  [12928/30013]\n",
      "loss: 0.342811  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 40\n",
      "-------------------------------\n",
      "loss: 0.220082  [  128/30013]\n",
      "loss: 0.391808  [12928/30013]\n",
      "loss: 0.354668  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 41\n",
      "-------------------------------\n",
      "loss: 0.238853  [  128/30013]\n",
      "loss: 0.365508  [12928/30013]\n",
      "loss: 0.277144  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 42\n",
      "-------------------------------\n",
      "loss: 0.293672  [  128/30013]\n",
      "loss: 0.276100  [12928/30013]\n",
      "loss: 0.267052  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 43\n",
      "-------------------------------\n",
      "loss: 0.315201  [  128/30013]\n",
      "loss: 0.294057  [12928/30013]\n",
      "loss: 0.347205  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 44\n",
      "-------------------------------\n",
      "loss: 0.345652  [  128/30013]\n",
      "loss: 0.226316  [12928/30013]\n",
      "loss: 0.394405  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Epoch 45\n",
      "-------------------------------\n",
      "loss: 0.413002  [  128/30013]\n",
      "loss: 0.351622  [12928/30013]\n",
      "loss: 0.290472  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.8%\n",
      "Epoch 46\n",
      "-------------------------------\n",
      "loss: 0.357280  [  128/30013]\n",
      "loss: 0.290631  [12928/30013]\n",
      "loss: 0.268742  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 47\n",
      "-------------------------------\n",
      "loss: 0.332522  [  128/30013]\n",
      "loss: 0.251742  [12928/30013]\n",
      "loss: 0.280399  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.4%\n",
      "Epoch 48\n",
      "-------------------------------\n",
      "loss: 0.428883  [  128/30013]\n",
      "loss: 0.314611  [12928/30013]\n",
      "loss: 0.359000  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 49\n",
      "-------------------------------\n",
      "loss: 0.362042  [  128/30013]\n",
      "loss: 0.437160  [12928/30013]\n",
      "loss: 0.323004  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.7%\n",
      "Epoch 50\n",
      "-------------------------------\n",
      "loss: 0.410589  [  128/30013]\n",
      "loss: 0.483560  [12928/30013]\n",
      "loss: 0.272685  [25728/30013]\n",
      "Train Error: \n",
      " Accuracy: 91.6%\n",
      "Test Error: \n",
      " Accuracy: 98.2%, Avg loss: 0.062501 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 97.9%, Avg loss: 0.070328 \n",
      "\n",
      "train: 0.9790424149535202 test: 0.9815\n",
      " Error: \n",
      " Accuracy: 97.7%  \n",
      "\n",
      "AUC value is: 0.5021612681836158\n",
      "Accuracy is: 0.50225\n",
      "AUC value is: 0.4941176616732053\n",
      "Accuracy is: 0.49565\n",
      "Test Error: \n",
      " Accuracy: 97.7%, Avg loss: 0.075083 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "(x_train, y_train), (x_test, y_test), train_keep_exp, test_keep_exp = load_MNIST(0, 100, prop_keep=0.5, seed=0)\n",
    "training_data = CustomDataset(x_train, y_train, model_transform)\n",
    "train_dataloader = DataLoader(training_data, batch_size=batch_size)\n",
    "\n",
    "\n",
    "for l2_norm in l2_norm_list:\n",
    "    \n",
    "    TargetModel = M_CNN_dropout(y_train.max()+1)\n",
    "    # print(TargetModel)\n",
    "    TargetModel.to(device)\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.Adam(TargetModel.parameters(), lr=1e-3, weight_decay=l2_norm)\n",
    "    # optimizer = torch.optim.Adam(TargetModel.parameters(), lr=2e-4)\n",
    "    for t in range(50):\n",
    "        print(f\"Epoch {t+1}\\n-------------------------------\")\n",
    "        train(train_dataloader, TargetModel, loss_fn, optimizer, device)\n",
    "    \n",
    "    test_acc = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "    train_acc = evaluate(train_dataloader, TargetModel, loss_fn, device)\n",
    "    test_acc_list.append(test_acc)\n",
    "    train_acc_list.append(train_acc)\n",
    "    print(\"train:\",train_acc,\"test:\",test_acc)\n",
    "    distance = train_acc - test_acc\n",
    "    gene_distance.append(distance)\n",
    "    \n",
    "    \n",
    "    x_test = X_data\n",
    "    y_test = Y_data\n",
    "    mem_label = train_keep[0]\n",
    "    # 加载目标模型\n",
    "    \n",
    "    all_data = CustomDataset(x_test, y_test, model_transform)\n",
    "    all_dataloader = DataLoader(all_data, batch_size=batch_size)\n",
    "    \n",
    "    conf_data, label_data = get_model_pred(all_dataloader, TargetModel, device)\n",
    "    conf_data = conf_data.detach().cpu().numpy()\n",
    "    label_data = label_data.detach().cpu().numpy()\n",
    "    conf_data = conf_data.astype(np.float64)\n",
    "    score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    \n",
    "    # 执行影子模型攻击\n",
    "    targetX = conf_data\n",
    "    pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "    targetY = mem_label\n",
    "    targetX, _ = get_top_k_conf(3, targetX, targetX)\n",
    "    targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "    targetX = targetX.astype(np.float32)\n",
    "    shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "    shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "    attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "    attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "    accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "    shadow_result.append(accuracy)\n",
    "    \n",
    "    # 执行风险评估攻击\n",
    "    score_tar = cal_score(conf_data.copy(), label_data)\n",
    "    pri_risk_t = pri_risk_all\n",
    "    pri_risk_rank_t = np.argsort(pri_risk_t)\n",
    "    pri_risk_rank_t = np.flip(pri_risk_rank_t)\n",
    "    \n",
    "    pred_result = LIRA_attack(train_keep, score_all, score_tar, mem_label)\n",
    "    evaluate_ROC(pred_result, mem_label, threshold=0)\n",
    "    pred_clip = pred_result[pri_risk_rank_t[:x]]\n",
    "    mem_clip = mem_label[pri_risk_rank_t[:x]]\n",
    "    pred_clip = pred_clip > 0\n",
    "    accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "    LIRA_result.append(accuracy)\n",
    "    \n",
    "    \n",
    "    # 执行基线攻击\n",
    "    loss_fn = nn.CrossEntropyLoss()\n",
    "    pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "    accuracy = metrics.accuracy_score(train_keep[tar_model], pred_result)\n",
    "    base_result.append(accuracy)\n",
    "    \n",
    "    pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "    mem_clip = train_keep[tar_model][pri_risk_rank[:x]]\n",
    "    accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "    risk_base_result.append(accuracy)\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "e5e501a5-a615-4446-92a6-9ecd3cf47f49",
   "metadata": {},
   "outputs": [],
   "source": [
    "# x = 100\n",
    "# shadow_result = []\n",
    "# LIRA_result = []\n",
    "# gene_distance = []\n",
    "# base_result = []\n",
    "# risk_base_result = []\n",
    "# test_acc_list = []\n",
    "# train_acc_list = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "9ebd60c9-e09f-4963-ac40-b265fb650153",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test Error: \n",
      " Accuracy: 98.9%, Avg loss: 0.039363 \n",
      "\n",
      "Test Error: \n",
      " Accuracy: 98.9%, Avg loss: 0.037609 \n",
      "\n",
      "train: 0.9886049378602606 test: 0.989\n",
      " Error: \n",
      " Accuracy: 98.6%  \n",
      "\n",
      "AUC value is: 0.5047665108950449\n",
      "Accuracy is: 0.5026666666666667\n",
      "AUC value is: 0.49516395964745463\n",
      "Accuracy is: 0.4957666666666667\n",
      "Test Error: \n",
      " Accuracy: 98.6%, Avg loss: 0.046183 \n",
      "\n"
     ]
    }
   ],
   "source": [
    "# test_acc = evaluate(test_dataloader, TargetModel, loss_fn, device)\n",
    "# train_acc = evaluate(train_dataloader, TargetModel, loss_fn, device)\n",
    "# test_acc_list.append(test_acc)\n",
    "# train_acc_list.append(train_acc)\n",
    "# print(\"train:\",train_acc,\"test:\",test_acc)\n",
    "# distance = train_acc - test_acc\n",
    "# gene_distance.append(distance)\n",
    "\n",
    "\n",
    "# x_test = X_data\n",
    "# y_test = Y_data\n",
    "# mem_label = train_keep[0]\n",
    "# # 加载目标模型\n",
    "\n",
    "# all_data = CustomDataset(x_test, y_test, model_transform)\n",
    "# all_dataloader = DataLoader(all_data, batch_size=batch_size)\n",
    "\n",
    "# conf_data, label_data = get_model_pred(all_dataloader, TargetModel, device)\n",
    "# conf_data = conf_data.detach().cpu().numpy()\n",
    "# label_data = label_data.detach().cpu().numpy()\n",
    "# conf_data = conf_data.astype(np.float64)\n",
    "# score_tar = cal_score(conf_data.copy(), label_data)\n",
    "\n",
    "# # 执行影子模型攻击\n",
    "# targetX = conf_data\n",
    "# pred_cor = (targetX.argmax(1) == Y_data).astype(int)\n",
    "# targetY = mem_label\n",
    "# targetX, _ = get_top_k_conf(3, targetX, targetX)\n",
    "# targetX = np.concatenate((targetX, pred_cor.reshape(pred_cor.shape[0],1)), 1)\n",
    "# targetX = targetX.astype(np.float32)\n",
    "# shadow_attack_data = CustomDataset(targetX, targetY, attack_transform)\n",
    "# shadow_attack_dataloader = DataLoader(shadow_attack_data, batch_size=batch_size, shuffle=False)\n",
    "# attack_test_scores, attack_test_mem = get_attack_pred(shadow_attack_dataloader, attack_model, device)\n",
    "# attack_test_scores, attack_test_mem = attack_test_scores.detach().cpu().numpy(), attack_test_mem.detach().cpu().numpy()\n",
    "# accuracy = evaluate_ROC(attack_test_scores, attack_test_mem)\n",
    "# shadow_result.append(accuracy)\n",
    "\n",
    "# # 执行风险评估攻击\n",
    "# score_tar = cal_score(conf_data.copy(), label_data)\n",
    "# pri_risk_t = pri_risk_all\n",
    "# pri_risk_rank_t = np.argsort(pri_risk_t)\n",
    "# pri_risk_rank_t = np.flip(pri_risk_rank_t)\n",
    "\n",
    "# pred_result = LIRA_attack(train_keep, score_all, score_tar, mem_label)\n",
    "# evaluate_ROC(pred_result, mem_label, threshold=0)\n",
    "# pred_clip = pred_result[pri_risk_rank_t[:x]]\n",
    "# mem_clip = mem_label[pri_risk_rank_t[:x]]\n",
    "# pred_clip = pred_clip > 0\n",
    "# accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "# LIRA_result.append(accuracy)\n",
    "\n",
    "\n",
    "# # 执行基线攻击\n",
    "# loss_fn = nn.CrossEntropyLoss()\n",
    "# pred_result = base_attack(all_dataloader, TargetModel, loss_fn, device)\n",
    "# accuracy = metrics.accuracy_score(train_keep[tar_model], pred_result)\n",
    "# base_result.append(accuracy)\n",
    "\n",
    "# pred_clip = pred_result[pri_risk_rank[:x]]\n",
    "# mem_clip = train_keep[tar_model][pri_risk_rank[:x]]\n",
    "# accuracy = metrics.accuracy_score(mem_clip, pred_clip)\n",
    "# risk_base_result.append(accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45322238-0672-4585-ac5b-40a0ca008780",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "22214a9f-fcbf-42df-bbb8-738590316cb3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "d6d99b52-cc42-4abd-a40a-f6d5a6de3b0f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.9878, 0.9854, 0.987, 0.9851, 0.9815]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_acc_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "37336607-fe70-4b5b-8162-8ba41e044c63",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.9898044180854962,\n",
       " 0.9878052843767701,\n",
       " 0.9880051977476427,\n",
       " 0.9843068003864992,\n",
       " 0.9790424149535202]"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_acc_list"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "6a11071c-e193-41cc-bfaa-78bfeebe68a0",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.002004418085496229,\n",
       " 0.0024052843767700116,\n",
       " 0.0010051977476427387,\n",
       " -0.0007931996135007724,\n",
       " -0.00245758504647986]"
      ]
     },
     "execution_count": 34,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gene_distance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "73688eb5-46a6-4f60-b69f-7f5f356ccfec",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5045166666666666,\n",
       " 0.5029166666666667,\n",
       " 0.5038833333333333,\n",
       " 0.5013666666666666,\n",
       " 0.50225]"
      ]
     },
     "execution_count": 35,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "shadow_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "b2fd8e89-871b-47f7-8f8e-e314ce33e05f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5283333333333333,\n",
       " 0.5133333333333333,\n",
       " 0.5166666666666667,\n",
       " 0.5033333333333333,\n",
       " 0.5083333333333333]"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "LIRA_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "2fb5d6a1-2c24-4b85-afd1-a05622ca03fb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.5029666666666667, 0.5033333333333333, 0.50295, 0.5022333333333333, 0.50215]"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "base_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "8a5d51d5-d8a5-44f3-af64-ba523aa7f856",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[0.585, 0.5766666666666667, 0.5616666666666666, 0.5466666666666666, 0.53]"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "risk_base_result"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3513ae24-c20d-4f13-91f0-25ba57053571",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f1d9e66-b8cb-45ad-92ad-d99dd74d6ad6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "35ad42c1-3598-4c85-bcd5-4b76c1fc7b5a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "a9b3ad57-7f95-4bc7-8057-05aa0ca4e10a",
   "metadata": {},
   "source": [
    "### 绘制攻击成功率随风险变化曲线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "03e43bb9-0680-487b-8433-805cb202031f",
   "metadata": {},
   "outputs": [],
   "source": [
    "pred_all = conf_data_all.argmax(2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "b7dda5b4-e79d-40b7-beb8-86d21143ac11",
   "metadata": {},
   "outputs": [],
   "source": [
    "base_att = (pred_all == Y_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "a79890e7-c8ea-4ddd-9e0a-c51831260ad9",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(100, 60000)"
      ]
     },
     "execution_count": 40,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "base_att.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "18020329-0447-496c-bffa-a59f865df12c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(100, 60000)"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_keep.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "67ad94bc-8667-4f2b-ab7b-3d32a98e97d8",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(60000,)"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pri_risk_all.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "973858e9-c4fd-47cd-9941-c5730dfff80a",
   "metadata": {},
   "outputs": [],
   "source": [
    "X_axi = []\n",
    "Y_axi = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "9769de6f-da9b-4523-a9c4-d16bb020ad1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "for i in range(10000):\n",
    "    pred_t = base_att[:,i]\n",
    "    mem_t = train_keep[:,i]\n",
    "    risk_t = pri_risk_all[i]\n",
    "    acc = metrics.accuracy_score(mem_t, pred_t)\n",
    "    X_axi.append(risk_t)\n",
    "    Y_axi.append(acc)\n",
    "\n",
    "df=pd.DataFrame({'risk': X_axi, 'attack_acc': Y_axi })\n",
    "df.to_csv('MNIST_risk_att.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cdb88106-86b6-4ba6-a1f4-424a942d38d8",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e9a9719-cc37-49e8-88da-a0018715ec76",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "60f8af95-80f7-4f41-b503-3398df204a89",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "57361cdb-0f28-470a-b570-1b8be369c191",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e13cc513-987d-4ae4-8179-0c2c1ada7cc2",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "4d44c0fa-c7b9-4f39-a245-0f8e3671141c",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000,)"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pri_risk_rank.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "030de03c-e397-4141-8f84-6cd9910e91a7",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 输入loss_data_all，pri_risk_rank，train_keep，conf_data_all，pri_risk_all\n",
    "\n",
    "for i in range(100): # 对数据下标进行计数\n",
    "    start = i * 500\n",
    "    end = (i+1) * 500\n",
    "    risk_t = pri_risk_all[pri_risk_rank[start:end]]\n",
    "    for j in range(100): # 对目标模型进行计数\n",
    "        pred_temp = base_att[j][pri_risk_rank[start:end]]\n",
    "        mem_temp = train_keep[j][pri_risk_rank[start:end]]\n",
    "        if j==0:\n",
    "            pred_t = pred_temp\n",
    "            mem_t = mem_temp\n",
    "        else:\n",
    "            pred_t = np.concatenate((pred_t, pred_temp), 0)\n",
    "            mem_t = np.concatenate((mem_t, mem_temp), 0)\n",
    "    acc = metrics.accuracy_score(mem_t, pred_t)\n",
    "    risk = np.mean(risk_t)\n",
    "    X_axi.append(risk)\n",
    "    Y_axi.append(acc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a4df62ec-4eb6-4c8f-a522-737a9e0d0e06",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "238bf52f-5f07-4176-9642-bd34be61a5a6",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAj8AAAGwCAYAAABGogSnAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/xnp5ZAAAACXBIWXMAAA9hAAAPYQGoP6dpAABIJUlEQVR4nO3dd3xUVf7/8fekF1KAQAIhBELvYJBQBBUjiC6KuIqIgii4q+CicVVYBdTdn6zriqjLgoViF1Fsi18Eg4hAaKFXadITSCCF9Mzc3x+BgZhCBiaZmczr+XjMA3Ln3Dufy81435577rkmwzAMAQAAuAkPRxcAAABQkwg/AADArRB+AACAWyH8AAAAt0L4AQAAboXwAwAA3ArhBwAAuBUvRxdQ0ywWi06cOKGgoCCZTCZHlwMAAKrAMAxlZ2ercePG8vC4ur4btws/J06cUFRUlKPLAAAAV+Do0aNq0qTJVW3D7cJPUFCQpJJ/vODgYAdXAwAAqiIrK0tRUVHW8/jVcLvwc+FSV3BwMOEHAAAXY48hKwx4BgAAboXwAwAA3ArhBwAAuBXCDwAAcCuEHwAA4FYIPwAAwK0QfgAAgFsh/AAAALdC+AEAAG6F8AMAANwK4QcAALgVwg8AAHArbvdgUwAAYF+GYaig2KK8QrPyiswymaRGIf6OLqtChB8AAGq5wmKL8orMyi8yK7fQbA0p1j+LzMorLD7/s6Xk7xfa/m69/PKWF5llGBc/r0ezevr8z70ct8OXQfgBAMCBzBbjfNAoVn6hRblFxWXDSSV/5haZlX/+59zz4cT69/PLiy3G5QuxEx9PD5lMNfZxV4TwAwBABSwWQ/nF50PG74JF3vnQUV4vSF7h73tVKv6z0Gypsf3x9DApwNtTfj6e8vf2VICPp/y8L/n7Jcv9vUveC/DxlL/PJX8/397//HLr388v9/J0/uHEhB8AgEv6/TiTMj0iv+8FuWS59TJPUenLPL8PLflFNRdMTCaVDhaV/GkNJz6eCvC+NJx4nW/nIX9vrzLreXuaZHL2bpkaQPgBAFSL8seZFCuv0HLxMk8Fl26uZJxJdfPz9qig18NL/hfeuyR8BPh4le5VOb9emV6V88t9vTwIJjWE8AMAuGIZuYU6cPqcDpzKKfnz9DkdOJ2jY2dzVWSu2XEmft4lgaPCXpPfhRabLvl4ecrDg2BSWxB+AACVMlsMncjI0/7T53Tg1LlSYSc9p/Cy6186zqS8Xo9LL91U5ZKPv4+nAry95OdzsSfGFcaZwHkQfgAAkqTcwmIdPJ1j7b05cD7sHErLUUFxxWNfGof4qUXDOmrRoI5aNAhUiwZ1FB0WqDq+JZeAfLwIJnAuhB8AcCOGYej0uYIyl6kOnDqn4xl5Fa7n4+WhmLDAiwHnfNhpHhaoQF9OJXAt/MYCQC1UZLboyJlcHTh17vzlqothJzu/uML16gX6WHtvWjSooxYNS/7epG6APBnzglqC8AMALiwrv+j8OJyLl6kOnD6nw+m5FU5s52GSouoFlLpM1bJhHcU0qKN6gT41vAdAzSP8AICTs1gMnczKvzjY+HxPzv7T53Q6u6DC9QJ8PEsFnAuXqqLrB8jP27MG9wBwLoQfAHASFouhg2k52pOSVeoy1cHTOcorMle4Xniw78XLVJeMx4kI9uP2bKAchB8AcADDMHQ4PVfbjmdq+7EMbTuWqR3HM5VTWH7I8fIwqVlYYKnLVC0a1FFMg0AF+XnXcPWAayP8AEA1MwxDxzPytP1YprYdz9S2YxnafixTWeUMPPbz9lDbiGC1alin1O3jUfUC5M1cNoBdEH4AwM5Ss/K19WiGth/P1LZjmdp+PFNnypkM0MfLQ+0aBatLkxB1igxR5yahatEgkAn7gGpG+AGAq5B2rqCkR+dYprYfL7l8daqcQcheHia1bRSkTpGh6nw+7LQOD2ICQMABCD8AUEUZuYUXe3OOlVy+OpGZX6adh0lqHR5U0psTFarOkSFqExHEHVaAkyD8AEA5svOLtON4lrYfz9DW82HnyJncMu1MJqlFgzrqHBmiTk1C1LlJiNo3CpG/D0EHcFaEHwBuL7ewWDtPZJ3v0cnQtuOZOng6p9y2zeoHqFOTUGvY6dA4mLutABdD+AHgdorMFm09mqFV+9O0Zn+6Nh05W+5syJGh/urcpGQgcucmIerYOEQhAQQdwNURfgDUeoZhaG9qtlbvT9fq/WladzC9zHw64cG+JSHnfI9Op8gQ1a/j66CKAVQnwg+AWul4Rp5W70sr6d05kK60c6XvwKob4K3eLcN0Xcsw9WkRpqb1AxxUKYCaRvgBUCuczSlU0sGSnp3V+9P0W3rpwcn+3p7q0bye+rSsrz4tw9QuIphHPwBuivADwCXlF5m14bcz1nE7O05kyrhk2I6nh0ldmoToupZh6t0yTN2ahsrXizuwABB+ALiIYrNF249nas2BdK3al6bkw2dVaLaUatM6vI56tyi5lBUXU4+7sACUi/ADwCkZhqEDp3O0en/JuJ21B9OV/btnYTUK8VOflmEll7JahKlhsJ+DqgXgSgg/AJzGycw8rd6frjXnBymnZJWePTnYz0u9WtS3XsqKCQuUycS4HQC2IfwAcJhLByknHUjXwbTSEwv6eHno2mZ1rZeyOkaGyJNBygCuEuEHQI3JLSzW+kNntOZASeDZdTKr1CBlD5PUqUmo+rQouSMrNrouz8MCYHeEHwDVprDYoi1HM7R6f5rWHEjTlqMZKjKXnkn5wiDl3i3qKy6mvkL8GaQMoHoRfgDYjcViaNfJLK05kKbV+9O14bczyv3dTMqRof7WuXZ6taivhkEMUgZQswg/AK5KZm6Rvtt2omTczsF0ZeQWlXq/fqCPerWor94tSu7KalovgEHKAByK8APgiuQVmjV/zW+atWK/si65BT3Qx1NxMfXV+/y4nTbhQcykDMCpEH4A2KTYbNHC5GOa8eOvSs0qeV5W6/A6Gty5sXq3DFPnJiHy9vRwcJUAUDHCD4AqMQxD/7cjRf/+Ya/1lvTIUH89NaC17ugayS3oAFwG4QfAZa3Zn6ZXluzR1mOZkqR6gT4af2NLjejZlOdlAXA5hB8AFdpxPFOvLNmjX/alSSoZzzOmb4zG9otRHV/+8wHANfFfLwBl/JaWo38v3av/bTspSfL2NGlEXLTG92+psDq+Dq4OAK4O4QeA1ansfL2ZuE+frT+qYoshk0ka0jVST8a3VtP6AY4uDwDswuG3ZMycOVPNmjWTn5+f4uLitH79+grbFhUV6aWXXlKLFi3k5+enLl26aMmSJTVYLVA7ZeUX6d8/7NX1/1qhj9YeUbHF0A1tGmjx4331+rCuBB8AtYpDe34WLFighIQEzZ49W3FxcZoxY4YGDhyovXv3qmHDhmXaP//88/roo4/07rvvqm3btvrhhx905513as2aNerWrZsD9gBwbbmFxfp47RHNXLHfOjlht6ahevaWtuoZU9/B1QFA9TAZhmFcvln1iIuL07XXXqv//Oc/kiSLxaKoqCg9/vjjmjhxYpn2jRs31nPPPadx48ZZl911113y9/fXRx99VKXPzMrKUkhIiDIzMxUcHGyfHQFczOnsAn2Q9Js+XHvYGnpaNqyjpwe20YD24czADMDp2PP87bCen8LCQiUnJ2vSpEnWZR4eHoqPj1dSUlK56xQUFMjPr/RzgPz9/bVq1aoKP6egoEAFBQXWn7Oysq6ycsB17T+Vrfd+OaRFm4+rsNgiSYquH6BxN7TU0Gsi5cXkhADcgMPCT1pamsxms8LDw0stDw8P1549e8pdZ+DAgZo+fbr69eunFi1aKDExUYsWLZLZbC63vSRNmzZNL774ol1rB1yJYRhaf+iM3ll5UIl7TlmXd2saqj/1i9HN7SOYoBCAW3Gpu73eeOMNjR07Vm3btpXJZFKLFi00evRozZ07t8J1Jk2apISEBOvPWVlZioqKqolyAYcqNlu0ZGeK3l150Do5ockkDWgfrkf6xSg2up6DKwQAx3BY+AkLC5Onp6dSU1NLLU9NTVVERES56zRo0EBff/218vPzlZ6ersaNG2vixImKiYmp8HN8fX3l68u8JHAfOQXF+nzjUc1ZdUjHzuZJkny9PPTH2CZ6+LrmimlQx8EVAoBjOSz8+Pj4KDY2VomJiRoyZIikkgHPiYmJGj9+fKXr+vn5KTIyUkVFRfryyy91zz331EDFgHM7lZWv+Wt+00drD1ufsl4v0Ecje0XrgZ7Rqs/khAAgycGXvRISEjRq1Ch1795dPXr00IwZM5STk6PRo0dLkkaOHKnIyEhNmzZNkrRu3TodP35cXbt21fHjx/XCCy/IYrHomWeeceRuAA71a2q23vvloL7efEKF5pJBzM3DAjWmb3PddU0T+Xnz7C0AuJRDw8+wYcN0+vRpTZkyRSkpKeratauWLFliHQR95MgReXhcvPskPz9fzz//vA4ePKg6dero1ltv1YcffqjQ0FAH7QHgGIZh6Jd9aXpv1SGt/PW0dXn36Lp6pF+M4tuFy4NBzABQLofO8+MIzPMDV5ZfZNa3W07ovVUH9WvqOUmSh0ka2CFCY/rGKDa6roMrBIDqUSvm+QFQdennCvTR2iP6cO1vSjtXKKnkCevDrm2q0X2aKaoej58AgKoi/ABObP+pbM1ZdUhfbro4KWHjED+N7tNcw3pEKdjP28EVAoDrIfwATsYwDK3en673Vh3Uir0Xx/N0aRKiMX1jdEvHCHkzEzMAXDHCD+Akis0WfbX5uOasOqQ9KdmSLk5KOPb8eB6euQUAV4/wAziBjb+d0fNf77CGngAfT93TPUqj+zRTdP1AB1cHALUL4QdwoPRzBfrn/+3RwuRjkqTQAG/9qV8L3dejqUICGM8DANWB8AM4gMVi6LMNR/XKkj3KzCuSJN17bZSeuaWt6gX6OLg6AKjdCD9ADdtxPFPPfb1DW49mSJLaNwrW34d0ZI4eAKghhB+ghmTmFWn60r36cO1hWQwpyNdLCQNa64Ge0fLi7i0AqDGEH6CaGYahb7ac0D8W71bauQJJ0h1dG+u5W9upYbCfg6sDAPdD+AGq0b7UbE3+ZofWHjwjSYppEKh/3NFRvVuGObgyAHBfhB+gGuxNydYHSb9pwYajKrYY8vP20OP9W2ls3xj5eHGJCwAcifAD2ElhsUVLd6Xog6TDWn/ojHX5ze3DNeUP7Xn+FgA4CcIPcJVOZubp03VH9OmGozqdXTKmx9PDpJvbhWtk72j1bsElLgBwJoQf4AoYhqE1B9L1YdJhLdudKrPFkCQ1CPLV8B5NNbxHlBqF+Du4SgBAeQg/gA2y84v0RfIxfbj2sA6ezrEuj2teTw/0itaA9hGM6QEAJ0f4Aapo/aEzeuKzzTqRmS9JCvTx1NBrmuiBXtFqHR7k4OoAAFVF+AEuo9hs0ZvL9+s/y/fJYkhN6wVobL8Y3dktUnV8+QoBgKvhv9xAJY6dzdUTn23RxsNnJUl/jG2iF2/voEBCDwC4LP4LDlRg8baTmrhom7LzixXk66V/3NlRd3SNdHRZAICrRPgBfie3sFgvfrtLCzYelSR1axqqN+/txjw9AFBLEH6AS+w4nqm/fLZZB0/nyGSSxt3QUhPiW8mbB48CQK1B+AFUMm/P3NW/6ZX/26NCs0Xhwb56fVhXJigEgFqI8AO3dyanUE99vkU/7T0tqeRxFP+6q7PqBvo4uDIAQHUg/MCtbT2aocc+3qTjGXny8fLQ5Nva6f6e0TKZTI4uDQBQTQg/cEuGYeiT9Uf04re7VGi2qFn9AM26P1btGgU7ujQAQDUj/MDt5BWa9dzX27Vo03FJ0oD24fr3PV0U7Oft4MoAADWB8AO3cigtR49+lKw9KdnyMEnP3tJWj/SL4TIXALgRwg/cxg87U/TXz7cqu6BYYXV89Nbwa9SrRX1HlwUAqGGEH9R6xWaLXl26V2//fFCS1D26rmaOuEbhwX4OrgwA4AiEH9RqaecKNP6TTVp78Iwk6aE+zTXp1rZMWggAbozwg1rr4OlzGjVvvY6eyVOgj6de+WNn/aFzY0eXBQBwMMIPaqXkw2c15v0NOptbpKb1AjT3we5q2TDI0WUBAJwA4Qe1ztKdKXr8080qKLaoc5MQzRl1rRoE+Tq6LACAkyD8oFb5cO1hTf1mhyyG1L9tQ/3nvm4K8OHXHABwEWcF1AqGYehfP+zVrBUHJEnDe0Tp73d0lBcDmwEAv0P4gcsrLLbo2S+36avNJTM2J9zcWo/3b8nEhQCAchF+4NKy84v06EebtGp/mjw9TJo2tJPu6R7l6LIAAE6M8AOXlZqVr1Fz12tPSrYCfTz13/tjdX3rBo4uCwDg5Ag/cDmZeUVauPGo3ll5UKeyC9QgyFfzHrxWHSNDHF0aAMAFEH7gMvalZuv9pN/0ZfJx5RWZJUktGgRq/ugeiqoX4ODqAACugvADp2a2GPppzynNX/ObVu1Psy5vGxGkUb2baUjXSPn7eDqwQgCAqyH8wClduLT1QdJhHTmTK0nyMEk3tw/Xg72bq2dMPe7mAgBcEcIPnM7320/q6YVblVNYcmkrxN9b914bpft7RnN5CwBw1Qg/cCpzVx3S3xfvkmFIrcPraHSf5lzaAgDYFeEHTsFiMTTt/3br3V8OSZJG9orW1MEd5OnBpS0AgH0RfuBwBcVm/XXhNn239YQk6dlb2urP18cwpgcAUC0IP3CozLwi/enDjVp78Iy8PEx69e7OurNbE0eXBQCoxQg/cJiTmXl6cO4G7U3NVh1fL826/xr1bcUMzQCA6kX4gUP8mpqtUXPX62RmvhoG+Wre6GvVoTEzNAMAqh/hBzVu7cF0PfLBRmXlF6tFg0C9/1APNanLLewAgJpB+EGNWrjxqP721XYVmQ11j66r90Z1V2iAj6PLAgC4EcIPaoTFYujVpXs1a8UBSdJtnRrptXu6yM+b+XsAADWL8INql1tYrIQFW7VkZ4ok6fH+LfVkfGt5MIcPAMABCD+oVqlZ+Rrz/kZtP54pH08PvfLHTtzKDgBwKMIPqs2O45ka8/5GpWTlq16gj955IFbdm9VzdFkAADdH+EG1+GFnip74bIvyisxq1bCO5oy6Vk3rc0cXAMDxCD+wq6z8Ik1f+qveT/pNhiH1bRWmmSOuUbCft6NLAwBAEuEHdmIYhr7dekJ//99upZ0rkFTycNIpf2gvL08PB1cHAMBFhB9ctf2nsjX5651KOpguSYoJC9RLd3TUda3CHFwZAABlEX5wxXILi/XW8v1675eDKjIb8vXy0OP9W2psvxj5ejF/DwDAORF+cEV+2XdaE7/cruMZeZKkm9o21Au3d1BUPQY1AwCcG+EHNks+fFYPz9+oQrNFkaH+euH2Drq5fbijywIAoEocPhJ15syZatasmfz8/BQXF6f169dX2n7GjBlq06aN/P39FRUVpSeffFL5+fk1VC2OZ+TpTx+WBJ/4dg21LKEfwQcA4FJsDj+jRo3SypUr7fLhCxYsUEJCgqZOnapNmzapS5cuGjhwoE6dOlVu+08++UQTJ07U1KlTtXv3bs2ZM0cLFizQ3/72N7vUg8rlFhZr7PsblXauUG0jgvTGvd0U4EPnIQDAtdgcfjIzMxUfH69WrVrp5Zdf1vHjx6/4w6dPn66xY8dq9OjRat++vWbPnq2AgADNnTu33PZr1qxRnz59dN9996lZs2YaMGCAhg8fftneIlw9i8XQU59v1a6TWaof6KP3RnVXoC/BBwDgemwOP19//bWOHz+uRx99VAsWLFCzZs00aNAgffHFFyoqKqrydgoLC5WcnKz4+PiLxXh4KD4+XklJSeWu07t3byUnJ1vDzsGDB/X999/r1ltvrfBzCgoKlJWVVeoF272RuE//tyNF3p4mzX4gVk3qMrAZAOCarmjMT4MGDZSQkKCtW7dq3bp1atmypR544AE1btxYTz75pPbt23fZbaSlpclsNis8vPR4kfDwcKWkpJS7zn333aeXXnpJ1113nby9vdWiRQvdcMMNlV72mjZtmkJCQqyvqKgo23YW+t+2E3ojseSY/r8hnXQtz+cCALiwqxrwfPLkSS1btkzLli2Tp6enbr31Vm3fvl3t27fX66+/bq8arVasWKGXX35Z//3vf7Vp0yYtWrRIixcv1t///vcK15k0aZIyMzOtr6NHj9q9rtpsx/FM/XXhVknSmOua655rCY8AANdm86CNoqIiffvtt5o3b56WLl2qzp0764knntB9992n4OBgSdJXX32lhx56SE8++WSF2wkLC5Onp6dSU1NLLU9NTVVERES560yePFkPPPCAxowZI0nq1KmTcnJy9Mgjj+i5556Th0fZLOfr6ytfX19bdxOSTmXla+wHG5VfZNH1rRto0q3tHF0SAABXzebw06hRI1ksFutA465du5Zpc+ONNyo0NLTS7fj4+Cg2NlaJiYkaMmSIJMlisSgxMVHjx48vd53c3NwyAcfTs2QmYcMwbN0VVGLXiSz95bPNOpmZrxYNAvXWfd3k6WFydFkAAFw1m8PP66+/rrvvvlt+fn4VtgkNDdWhQ4cuu62EhASNGjVK3bt3V48ePTRjxgzl5ORo9OjRkqSRI0cqMjJS06ZNkyQNHjxY06dPV7du3RQXF6f9+/dr8uTJGjx4sDUE4epYLIbmrj6kfy3Zq0KzRQ2CfPXeqGt5KjsAoNawOfzcfvvtys3NLRN+zpw5Iy8vL+ulr6oYNmyYTp8+rSlTpiglJUVdu3bVkiVLrIOgjxw5Uqqn5/nnn5fJZNLzzz+v48ePq0GDBho8eLD+3//7f7buBsqRmpWvvy7cql/2pUmS4ts11Ct3dVb9Olw2BADUHibDxutFgwYN0uDBg/XYY4+VWj579mx9++23+v777+1aoL1lZWUpJCREmZmZNgW12m7JjhRNWrRNZ3OL5Oftocl/aK/7ejSVycSlLgCA49nz/G3z3V7r1q3TjTfeWGb5DTfcoHXr1l1VMah5uYXFmrRom/78UbLO5hapQ+Ng/e/xvhoRF03wAQDUSjZf9iooKFBxcXGZ5UVFRcrLy7NLUagZuYXFuvedtdp2LFMmk/RIvxg9dXMb+Xg5/JFvAABUG5vPcj169NA777xTZvns2bMVGxtrl6JQ/cwWQxM+26JtxzJVN8BbHz8cp0mD2hF8AAC1ns09P//4xz8UHx+vrVu36qabbpIkJSYmasOGDVq6dKndC0T1mPb9bi3blSofTw+9O7K7ujNrMwDATdj8v/l9+vRRUlKSoqKi9Pnnn+u7775Ty5YttW3bNvXt27c6aoSdfbT2sN5bVTIVwat3dyb4AADcis13e7k6d7/ba8XeU3r4/Y0yWww9dXNrPX5TK0eXBADAZdnz/G3zZa9L5efnq7CwsNQydwwUrmJPSpbGf7JZZouhu65povH9Wzq6JAAAapzNl71yc3M1fvx4NWzYUIGBgapbt26pF5zTqax8PTRvg84VFCuueT1NG9qJW9kBAG7J5vDz9NNPa/ny5Zo1a5Z8fX313nvv6cUXX1Tjxo31wQcfVEeNuEoZuYV6+P2NOpGZr5iwQL39QCx3dQEA3JbNl72+++47ffDBB7rhhhs0evRo9e3bVy1btlR0dLQ+/vhjjRgxojrqxBU6djZXo+au14HTOaob4K25D16r0AAfR5cFAIDD2Py//2fOnFFMTIykkvE9Z86ckSRdd911WrlypX2rw1XZdSJLQ/+7RgdO56hRiJ8W/KmXmoUFOrosAAAcyubwExMTY31ie9u2bfX5559LKukRCg0NtWtxuHJr9qdp2NtJOpVdoNbhdfTlo73VOjzI0WUBAOBwNoef0aNHa+vWrZKkiRMnaubMmfLz89OTTz6pp59+2u4Fwnbfbj2hUfPWK7ugWD2a19PCP/dW41B/R5cFAIBTuOp5fg4fPqzk5GS1bNlSnTt3tldd1aa2z/Pz2fojmrhouyTp1k4Rmn5PV/l5ezq4KgAAro7DnupeVFSkm266Sfv27bMui46O1tChQ10i+NR2m4+c1eRvdkiSRvWK1lvDryH4AADwOzbd7eXt7a1t27ZVVy24CmdzCjX+k80qMhsa1DFCL9zegXl8AAAoh81jfu6//37NmTOnOmrBFbJYDCV8vkXHM/LUrH6AXvljZ4IPAAAVsHmen+LiYs2dO1c//vijYmNjFRhY+tbp6dOn2604VM2snw/op72n5ePloZkjrlGwn7ejSwIAwGnZHH527Niha665RpL066+/lnqP3oaal3QgXa8t3StJeun2DurQOMTBFQEA4NxsDj8//fRTddSBK3AqO1+Pf7pZFkMaek2khl0b5eiSAABwejzgyUWZLYYmfLpFaedKJjH8x5CO9LwBAFAFNvf83HjjjZWeZJcvX35VBaFq/rN8v5IOpivAx1P/HRGrAB+bDyUAAG7J5jNm165dS/1cVFSkLVu2aMeOHRo1apS96kIlkg6k643EkvFW/xjSUS0b1nFwRQAAuA6bw8/rr79e7vIXXnhB586du+qCULn0cwWa8FnJOJ8/xjbR0GuaOLokAABcit3G/Nx///2aO3euvTaHclgshp5auFWnsgvUokGgXrqjg6NLAgDA5dgt/CQlJcnPz89em0M53v3loFbsPS3f8/P5MM4HAADb2Xz2HDp0aKmfDcPQyZMntXHjRk2ePNluhaG0Hccz9eoPJfP5vHB7B7WNqH0PZQUAoCbYHH5CQkpPoufh4aE2bdropZde0oABA+xWGC4yDEMvfLtTxZaS53bdy3w+AABcMZvDz7x586qjDlTi260ntPHwWfl7e2rK4PbM5wMAwFWweczPhg0btG7dujLL161bp40bN9qlKFyUU1Csad/vkSSNu7GFGoX4O7giAABcm83hZ9y4cTp69GiZ5cePH9e4cePsUhQu+u+K/UrJyldUPX+N6Rvj6HIAAHB5NoefXbt2WR9seqlu3bpp165ddikKJQ6n5+jdlYckSc/f1l5+3p4OrggAANdnc/jx9fVVampqmeUnT56Ulxe3XtvTPxbvVqHZor6twjSgfbijywEAoFawOfwMGDBAkyZNUmZmpnVZRkaG/va3v+nmm2+2a3HubONvZ7RsV6q8PEyayiBnAADsxuaumn//+9/q16+foqOj1a1bN0nSli1bFB4erg8//NDuBbqrt1celFTyCIuWDYMcXA0AALWHzeEnMjJS27Zt08cff6ytW7fK399fo0eP1vDhw+Xt7V0dNbqdA6fP6cfdJZcWGeQMAIB9XdEgncDAQD3yyCP2rgXnzVl1SIYhxbdryBPbAQCwM5vH/EybNq3cB5jOnTtXr7zyil2Kcmdp5wr0ZfIxSdIj/Vo4uBoAAGofm8PP22+/rbZt25ZZ3qFDB82ePdsuRbmzD5IOq6DYoi5Robq2WV1HlwMAQK1jc/hJSUlRo0aNyixv0KCBTp48aZei3FVeoVkfJv0mSXqkbwx3eAEAUA1sDj9RUVFavXp1meWrV69W48aN7VKUu/py0zGdzS1SVD1/DezAvD4AAFQHmwc8jx07Vk888YSKiorUv39/SVJiYqKeeeYZPfXUU3Yv0J18u+WEJGlkz2by8rQ5lwIAgCqwOfw8/fTTSk9P12OPPabCwkJJkp+fn5599llNmjTJ7gW6i7RzBdp4+IwkaVCnCAdXAwBA7WVz+DGZTHrllVc0efJk7d69W/7+/mrVqpV8fX2roz63sXz3KVkMqWNksJrUDXB0OQAA1FpX/DCuOnXq6Nprr7VnLW7th50pkqQB7en1AQCgOl1R+Nm4caM+//xzHTlyxHrp64JFixbZpTB3klNQrF/2p0mSBjDQGQCAamXzqNrPPvtMvXv31u7du/XVV1+pqKhIO3fu1PLlyxUSElIdNdZ6K389rcJii6LrB6hNOM/xAgCgOtkcfl5++WW9/vrr+u677+Tj46M33nhDe/bs0T333KOmTZtWR4213tJdJc/xGtA+nLl9AACoZjaHnwMHDui2226TJPn4+CgnJ0cmk0lPPvmk3nnnHbsXWNsVmS1KPP8Q0wEdGO8DAEB1szn81K1bV9nZ2ZJKnvC+Y8cOSVJGRoZyc3PtW50bWHfwjLLyi1U/0EfXNOVxFgAAVDebBzz369dPy5YtU6dOnXT33XdrwoQJWr58uZYtW6abbrqpOmqs1S7c5XVz+3B5enDJCwCA6mZz+PnPf/6j/Px8SdJzzz0nb29vrVmzRnfddZeef/55uxdYm2XnF+nrzcclSbd05JIXAAA1webwU69ePevfPTw8NHHiRLsW5E4+XX9E2QXFatmwjvq1auDocgAAcAs8QMpBCostmrPqkCTpkX4x8uCSFwAANYLw4yDfbDmu1KwChQf76o6ujR1dDgAAboPw4wAWi6F3Vh6UJD3Up7l8vTwdXBEAAO6D8OMAP/96WvtOnVMdXy8Nj2NiSAAAapLN4eenn36q8L2ZM2deVTHuYtH5O7zu7t5EwX7eDq4GAAD3YnP4GTp0qJKTk8ssf+ONNzRp0iS7FFWb5RWarTM6396FsT4AANQ0m8PPq6++qkGDBmnPnj3WZa+99pqmTJmixYsX27W42mj5nlPKLTSrSV1/dY0KdXQ5AAC4HZvn+RkzZozOnDmj+Ph4rVq1SgsWLNDLL7+s77//Xn369KmOGmuV/207IUm6rXMjHmIKAIAD2Bx+JOmZZ55Renq6unfvLrPZrB9++EE9e/a0d221Tk5BsZbvOSVJGtyZS14AADhClcLPm2++WWZZZGSkAgIC1K9fP61fv17r16+XJP3lL3+xb4W1yI+7U1VQbFF0/QB1aBzs6HIAAHBLJsMwjMs1at68edU2ZjLp4MGDNhcxc+ZMvfrqq0pJSVGXLl301ltvqUePHuW2veGGG/Tzzz+XWX7rrbdWacxRVlaWQkJClJmZqeDgmg0gI95bq9X70zX+xpb668A2NfrZAAC4Mnuev6vU83Po0KGr+pDKLFiwQAkJCZo9e7bi4uI0Y8YMDRw4UHv37lXDhg3LtF+0aJEKCwutP6enp6tLly66++67q61Ge/g1NVur96fLwyTd2yPK0eUAAOC2HD7J4fTp0zV27FiNHj1a7du31+zZsxUQEKC5c+eW275evXqKiIiwvpYtW6aAgIAKw09BQYGysrJKvRxh/prfJEkD2keoSd0Ah9QAAACuIPzcddddeuWVV8os/9e//mVz70thYaGSk5MVHx9/sSAPD8XHxyspKalK25gzZ47uvfdeBQYGlvv+tGnTFBISYn1FRdV8r0tmbpEWbTomSXqwT7Ma/3wAAHCRzeFn5cqVuvXWW8ssHzRokFauXGnTttLS0mQ2mxUeHl5qeXh4uFJSUi67/vr167Vjxw6NGTOmwjaTJk1SZmam9XX06FGbarSH/9txUvlFFrWNCFJc83o1/vkAAOAim291P3funHx8fMos9/b2rvFLSnPmzFGnTp0qHBwtSb6+vvL19a3Bqspaf+iMJCm+XThz+wAA4GA29/x06tRJCxYsKLP8s88+U/v27W3aVlhYmDw9PZWamlpqeWpqqiIiIipdNycnR5999pkefvhhmz7TEdb/VhJ+etDrAwCAw9nc8zN58mQNHTpUBw4cUP/+/SVJiYmJ+vTTT7Vw4UKbtuXj46PY2FglJiZqyJAhkiSLxaLExESNHz++0nUXLlyogoIC3X///bbuQo06kZGnY2fz5GGSromu6+hyAABwezaHn8GDB+vrr7/Wyy+/rC+++EL+/v7q3LmzfvzxR11//fU2F5CQkKBRo0ape/fu6tGjh2bMmKGcnByNHj1akjRy5EhFRkZq2rRppdabM2eOhgwZovr169v8mTVpw/len46RIarje0UTagMAADu6orPxbbfdpttuu80uBQwbNkynT5/WlClTlJKSoq5du2rJkiXWQdBHjhyRh0fpq3N79+7VqlWrtHTpUrvUUJ3WnR/v06MZl7wAAHAGVZrhuTap6RmeB7z+s35NPae3H4jVwA6Vj2MCAADlq/EZni9lNpv1+uuv6/PPP9eRI0dKzbYsSWfOnLmqgmqT7Pwi7Tt1TpJ0TVPG+wAA4AxsvtvrxRdf1PTp0zVs2DBlZmYqISFBQ4cOlYeHh1544YVqKNF1bT+WKcOQIkP91SDIsbfbAwCAEjaHn48//ljvvvuunnrqKXl5eWn48OF67733NGXKFK1du7Y6anRZm49mSJK6Ng11aB0AAOAim8NPSkqKOnXqJEmqU6eOMjMzJUl/+MMfqvRUdXey5Xz46RYV6tA6AADARTaHnyZNmujkyZOSpBYtWljvuNqwYYPDZ1J2Nlsv9PwQfgAAcBo2h58777xTiYmJkqTHH39ckydPVqtWrTRy5Eg99NBDdi/QVZ3OLtCp7AKZTFKHxiGOLgcAAJxn891e//znP61/HzZsmKKjo7VmzRq1atVKgwcPtmtxrmxvSrYkqXn9QPn7eDq4GgAAcIHN4WflypXq3bu3vLxKVu3Zs6d69uyp4uJirVy5Uv369bN7ka5oT0rJQ17bRAQ5uBIAAHApmy973XjjjeXO5ZOZmakbb7zRLkXVBnvO9/y0jaj+iRQBAEDV2Rx+DMOQyWQqszw9PV2BgYF2Kao2oOcHAADnVOXLXkOHDpUkmUwmPfjgg6Xu7DKbzdq2bZt69+5t/wpdkNliaF9qyczOhB8AAJxLlcNPSEjJHUuGYSgoKEj+/v7W93x8fNSzZ0+NHTvW/hW6oCNnclVQbJGft4ea1gtwdDkAAOASVQ4/8+bNkyQ1a9ZMTz/9tAICOKlX5MKdXq3Dg+TpUfYSIQAAcBybx/z8/PPPZR5mKpU8bbV///52KcrV/ZpaEn5aNeSSFwAAzsZu4Sc/P1+//PKLXYpydb+l50iSYhowABwAAGdT5cte27Ztk1Qy5mfXrl1KSUmxvmc2m7VkyRJFRkbav0IXdOxMniSpSV3/y7QEAAA1rcrhp2vXrjKZTDKZTOVe3vL399dbb71l1+Jc1bGzuZKkKAY7AwDgdKocfg4dOiTDMBQTE6P169erQYMG1vd8fHzUsGFDeXryGIfCYotOZuVLkqLqEn4AAHA2VQ4/0dHRkiSLxVJtxdQGqVn5MgzJx9NDYXV8HF0OAAD4HZuf7XXBrl27dOTIkTKDn2+//farLsqVZeYVSZLqBnqXOxM2AABwLJvDz8GDB3XnnXdq+/btMplMMgxDkqwnerPZbN8KXUxGbkn4CfWn1wcAAGdk863uEyZMUPPmzXXq1CkFBARo586dWrlypbp3764VK1ZUQ4mu5WxuSU9YSIC3gysBAADlsbnnJykpScuXL1dYWJg8PDzk4eGh6667TtOmTdNf/vIXbd68uTrqdBkZFy57EX4AAHBKNvf8mM1mBQWVzFwcFhamEydOSCoZEL137177VueCMs/3/HDZCwAA52Rzz0/Hjh21detWNW/eXHFxcfrXv/4lHx8fvfPOO4qJiamOGl3KmZzzY34C6fkBAMAZ2Rx+nn/+eeXklDy+4aWXXtIf/vAH9e3bV/Xr19eCBQvsXqCrOX2uQJLUoI6vgysBAADlsTn8DBw40Pr3li1bas+ePTpz5ozq1q3Lrd2S0rJLwk8Y4QcAAKd0xfP8XKpevXr22EytkHaO8AMAgDOzecAzKncmp2TAc31mdwYAwCkRfuzIMAxl5ZcMeA7xZ8AzAADOiPBjRwXFFhWZS2a8Dib8AADglAg/dpR1foJDD5MU6MMT7gEAcEaEHzu6cMkr2J+HmgIA4KwIP3aUlV8sSarja5eb6AAAQDUg/NhR9vnwE+THeB8AAJwV4ceOss9f9gryo+cHAABnRfixo3MXen647AUAgNMi/NjRuYKS8BNI+AEAwGkRfuyooNgiSfLz5p8VAABnxVnajvKLzJIkP2/m+AEAwFkRfuzoYs8P4QcAAGdF+LEja8+PF/+sAAA4K87SdlRQVNLz40P4AQDAaXGWtqMiM+EHAABnx1najgouhB9P/lkBAHBWnKXtqOj8gGdven4AAHBanKXt6MJlL296fgAAcFqcpe2okMteAAA4Pc7SdnQ2p+TBpiEBPNUdAABnRfixo7zz8/zwYFMAAJwX4ceOii0ll708PUwOrgQAAFSE8GNHZrMhSfLy4J8VAABnxVnajootJeGHnh8AAJwX4ceOzOfDj5cn4QcAAGdF+LGjCz0/HibCDwAAzorwY0eF52d49mWGZwAAnBZnaTtihmcAAJwfZ2k7KmbMDwAATo/wYyeGYVj/zpgfAACcF+HHTiwXs4+40x0AAOdF+LET8yXpx0TPDwAATovwYyeWUpe9HFgIAAColMPDz8yZM9WsWTP5+fkpLi5O69evr7R9RkaGxo0bp0aNGsnX11etW7fW999/X0PVVswoddmL9AMAgLNy6OPHFyxYoISEBM2ePVtxcXGaMWOGBg4cqL1796phw4Zl2hcWFurmm29Ww4YN9cUXXygyMlKHDx9WaGhozRf/OxceairxeAsAAJyZQ8PP9OnTNXbsWI0ePVqSNHv2bC1evFhz587VxIkTy7SfO3euzpw5ozVr1sjb21uS1KxZs5osuULF5otdP8zzAwCA83LYWbqwsFDJycmKj4+/WIyHh+Lj45WUlFTuOt9++6169eqlcePGKTw8XB07dtTLL78ss9lc4ecUFBQoKyur1Ks6FF8y4JmeHwAAnJfDwk9aWprMZrPCw8NLLQ8PD1dKSkq56xw8eFBffPGFzGazvv/+e02ePFmvvfaa/vGPf1T4OdOmTVNISIj1FRUVZdf9uODCPD/kHgAAnJtLXZ+xWCxq2LCh3nnnHcXGxmrYsGF67rnnNHv27ArXmTRpkjIzM62vo0ePVk9t5zt+GOwMAIBzc9iYn7CwMHl6eio1NbXU8tTUVEVERJS7TqNGjeTt7S1PT0/rsnbt2iklJUWFhYXy8fEps46vr698fX3tW3w5LtzqTvYBAMC5Oaznx8fHR7GxsUpMTLQus1gsSkxMVK9evcpdp0+fPtq/f78sl9xZ9euvv6pRo0blBp+adGGSQ3p+AABwbg697JWQkKB3331X77//vnbv3q1HH31UOTk51ru/Ro4cqUmTJlnbP/roozpz5owmTJigX3/9VYsXL9bLL7+scePGOWoXrC70/DDYGQAA5+bQW92HDRum06dPa8qUKUpJSVHXrl21ZMkS6yDoI0eOyMPjYj6LiorSDz/8oCeffFKdO3dWZGSkJkyYoGeffdZRu2DFmB8AAFyDybj0ceRuICsrSyEhIcrMzFRwcLDdtnvw9Dn1f+1nBfl5afsLA+22XQAAYN/zt0vd7eXMLiRI+n0AAHBuhB87udB/xhPdAQBwboQfu+FWdwAAXAHhx06sPT+OLQMAAFwG4cdOrGN+6PoBAMCpEX7shJ4fAABcA+HHTgzG/AAA4BIIP3ZycbYk0g8AAM6M8GMnFx5vwdMtAABwboQfO7k4z49j6wAAAJUj/NiZicteAAA4NcIPAABwK4QfO+OyFwAAzo3wYycX7/YCAADOjPBjJ9Z5fhxcBwAAqBzhx054qjsAAK6B8AMAANwK4QcAALgVwo+dMN4ZAADXQPgBAABuhfADAADcCuHHzrjZCwAA50b4AQAAboXwAwAA3Arhx04Mnm8BAIBLIPwAAAC3QvgBAABuhfADAADcCuHHzrjVHQAA50b4AQAAboXwAwAA3ArhBwAAuBXCDwAAcCuEHzthikMAAFwD4QcAALgVwg8AAHArhB8AAOBWCD8AAMCtEH4AAIBbIfwAAAC3QvgBAABuhfBjJ7kFZknS0TN5Dq4EAABUhvBjJ4u3n3B0CQAAoAoIP3Zye5dIR5cAAACqgPBjJ77eJf+UTesFOLgSAABQGcIPAABwK4QfAADgVgg/AADArRB+AACAWyH8AAAAt0L4AQAAboXwAwAA3ArhBwAAuBXCDwAAcCuEHwAA4FYIPwAAwK0QfgAAgFsh/AAAALdC+LGzrPwiR5cAAAAqQfixk19TsiVJGbmEHwAAnBnhx04OpeU4ugQAAFAFhB87GdAhwtElAACAKiD82InJVPJn03oBji0EAABUyinCz8yZM9WsWTP5+fkpLi5O69evr7Dt/PnzZTKZSr38/PxqsNryncjIkyQdOZPr4EoAAEBlHB5+FixYoISEBE2dOlWbNm1Sly5dNHDgQJ06darCdYKDg3Xy5Enr6/DhwzVYcfnGf7LZ0SUAAIAqcHj4mT59usaOHavRo0erffv2mj17tgICAjR37twK1zGZTIqIiLC+wsPDa7Di8k0c1NbRJQAAgCpwaPgpLCxUcnKy4uPjrcs8PDwUHx+vpKSkCtc7d+6coqOjFRUVpTvuuEM7d+6ssG1BQYGysrJKvarDn69vod/+eZt+++dt1bJ9AABgHw4NP2lpaTKbzWV6bsLDw5WSklLuOm3atNHcuXP1zTff6KOPPpLFYlHv3r117NixcttPmzZNISEh1ldUVJTd9wMAALgOh1/2slWvXr00cuRIde3aVddff70WLVqkBg0a6O233y63/aRJk5SZmWl9HT16tIYrBgAAzsTLkR8eFhYmT09PpaamllqempqqiIiqzZvj7e2tbt26af/+/eW+7+vrK19f36uuFQAA1A4O7fnx8fFRbGysEhMTrcssFosSExPVq1evKm3DbDZr+/btatSoUXWVCQAAahGH9vxIUkJCgkaNGqXu3burR48emjFjhnJycjR69GhJ0siRIxUZGalp06ZJkl566SX17NlTLVu2VEZGhl599VUdPnxYY8aMceRuAAAAF+Hw8DNs2DCdPn1aU6ZMUUpKirp27aolS5ZYB0EfOXJEHh4XO6jOnj2rsWPHKiUlRXXr1lVsbKzWrFmj9u3bO2oXAACACzEZhmE4uoialJWVpZCQEGVmZio4ONjR5QAAgCqw5/nb5e72AgAAuBqEHwAA4FYIPwAAwK0QfgAAgFsh/AAAALdC+AEAAG6F8AMAANyKwyc5rGkXpjXKyspycCUAAKCqLpy37TE9oduFn+zsbElSVFSUgysBAAC2ys7OVkhIyFVtw+1meLZYLDpx4oSCgoJkMpnsuu2srCxFRUXp6NGjtXr2aPaz9nGXfWU/axd32U/Jffa1sv00DEPZ2dlq3LhxqcdeXQm36/nx8PBQkyZNqvUzgoODa/Uv5wXsZ+3jLvvKftYu7rKfkvvsa0X7ebU9Phcw4BkAALgVwg8AAHArhB878vX11dSpU+Xr6+voUqoV+1n7uMu+sp+1i7vsp+Q++1pT++l2A54BAIB7o+cHAAC4FcIPAABwK4QfAADgVgg/AADArRB+bDRz5kw1a9ZMfn5+iouL0/r16yttv3DhQrVt21Z+fn7q1KmTvv/++xqq9MpMmzZN1157rYKCgtSwYUMNGTJEe/furXSd+fPny2QylXr5+fnVUMVX5oUXXihTc9u2bStdx9WO5QXNmjUrs68mk0njxo0rt72rHM+VK1dq8ODBaty4sUwmk77++utS7xuGoSlTpqhRo0by9/dXfHy89u3bd9nt2vodr26V7WdRUZGeffZZderUSYGBgWrcuLFGjhypEydOVLrNK/n9rwmXO6YPPvhgmbpvueWWy27XlY6ppHK/ryaTSa+++mqF23TGY1qV80l+fr7GjRun+vXrq06dOrrrrruUmppa6Xav9Lt9KcKPDRYsWKCEhARNnTpVmzZtUpcuXTRw4ECdOnWq3PZr1qzR8OHD9fDDD2vz5s0aMmSIhgwZoh07dtRw5VX3888/a9y4cVq7dq2WLVumoqIiDRgwQDk5OZWuFxwcrJMnT1pfhw8frqGKr1yHDh1K1bxq1aoK27risbxgw4YNpfZz2bJlkqS77767wnVc4Xjm5OSoS5cumjlzZrnv/+tf/9Kbb76p2bNna926dQoMDNTAgQOVn59f4TZt/Y7XhMr2Mzc3V5s2bdLkyZO1adMmLVq0SHv37tXtt99+2e3a8vtfUy53TCXplltuKVX3p59+Wuk2Xe2YSiq1fydPntTcuXNlMpl01113VbpdZzumVTmfPPnkk/ruu++0cOFC/fzzzzpx4oSGDh1a6Xav5LtdhoEq69GjhzFu3Djrz2az2WjcuLExbdq0ctvfc889xm233VZqWVxcnPGnP/2pWuu0p1OnThmSjJ9//rnCNvPmzTNCQkJqrig7mDp1qtGlS5cqt68Nx/KCCRMmGC1atDAsFku577vi8ZRkfPXVV9afLRaLERERYbz66qvWZRkZGYavr6/x6aefVrgdW7/jNe33+1me9evXG5KMw4cPV9jG1t9/RyhvX0eNGmXccccdNm2nNhzTO+64w+jfv3+lbVzhmP7+fJKRkWF4e3sbCxcutLbZvXu3IclISkoqdxtX+t3+PXp+qqiwsFDJycmKj4+3LvPw8FB8fLySkpLKXScpKalUe0kaOHBghe2dUWZmpiSpXr16lbY7d+6coqOjFRUVpTvuuEM7d+6sifKuyr59+9S4cWPFxMRoxIgROnLkSIVta8OxlEp+jz/66CM99NBDlT7Y1xWP56UOHTqklJSUUscsJCREcXFxFR6zK/mOO6PMzEyZTCaFhoZW2s6W339nsmLFCjVs2FBt2rTRo48+qvT09Arb1oZjmpqaqsWLF+vhhx++bFtnP6a/P58kJyerqKio1PFp27atmjZtWuHxuZLvdnkIP1WUlpYms9ms8PDwUsvDw8OVkpJS7jopKSk2tXc2FotFTzzxhPr06aOOHTtW2K5NmzaaO3euvvnmG3300UeyWCzq3bu3jh07VoPV2iYuLk7z58/XkiVLNGvWLB06dEh9+/ZVdnZ2ue1d/Vhe8PXXXysjI0MPPvhghW1c8Xj+3oXjYssxu5LvuLPJz8/Xs88+q+HDh1f68Etbf/+dxS233KIPPvhAiYmJeuWVV/Tzzz9r0KBBMpvN5bavDcf0/fffV1BQ0GUvBTn7MS3vfJKSkiIfH58yQf1y59ULbaq6Tnnc7qnuqLpx48Zpx44dl71u3KtXL/Xq1cv6c+/evdWuXTu9/fbb+vvf/17dZV6RQYMGWf/euXNnxcXFKTo6Wp9//nmV/g/LVc2ZM0eDBg1S48aNK2zjiscTJYOf77nnHhmGoVmzZlXa1lV//++9917r3zt16qTOnTurRYsWWrFihW666SYHVlZ95s6dqxEjRlz2pgNnP6ZVPZ/UFHp+qigsLEyenp5lRqGnpqYqIiKi3HUiIiJsau9Mxo8fr//973/66aef1KRJE5vW9fb2Vrdu3bR///5qqs7+QkND1bp16wprduVjecHhw4f1448/asyYMTat54rH88JxseWYXcl33FlcCD6HDx/WsmXLKu31Kc/lfv+dVUxMjMLCwiqs25WPqST98ssv2rt3r83fWcm5jmlF55OIiAgVFhYqIyOjVPvLnVcvtKnqOuUh/FSRj4+PYmNjlZiYaF1msViUmJhY6v+SL9WrV69S7SVp2bJlFbZ3BoZhaPz48frqq6+0fPlyNW/e3OZtmM1mbd++XY0aNaqGCqvHuXPndODAgQprdsVj+Xvz5s1Tw4YNddttt9m0nisez+bNmysiIqLUMcvKytK6desqPGZX8h13BheCz759+/Tjjz+qfv36Nm/jcr//zurYsWNKT0+vsG5XPaYXzJkzR7GxserSpYvN6zrDMb3c+SQ2Nlbe3t6ljs/evXt15MiRCo/PlXy3KyoOVfTZZ58Zvr6+xvz5841du3YZjzzyiBEaGmqkpKQYhmEYDzzwgDFx4kRr+9WrVxteXl7Gv//9b2P37t3G1KlTDW9vb2P79u2O2oXLevTRR42QkBBjxYoVxsmTJ62v3Nxca5vf7+eLL75o/PDDD8aBAweM5ORk49577zX8/PyMnTt3OmIXquSpp54yVqxYYRw6dMhYvXq1ER8fb4SFhRmnTp0yDKN2HMtLmc1mo2nTpsazzz5b5j1XPZ7Z2dnG5s2bjc2bNxuSjOnTpxubN2+23uX0z3/+0wgNDTW++eYbY9u2bcYdd9xhNG/e3MjLy7Nuo3///sZbb71l/fly33FHqGw/CwsLjdtvv91o0qSJsWXLllLf2YKCAus2fr+fl/v9d5TK9jU7O9v461//aiQlJRmHDh0yfvzxR+Oaa64xWrVqZeTn51u34erH9ILMzEwjICDAmDVrVrnbcIVjWpXzyZ///GejadOmxvLly42NGzcavXr1Mnr16lVqO23atDEWLVpk/bkq3+3LIfzY6K233jKaNm1q+Pj4GD169DDWrl1rfe/66683Ro0aVar9559/brRu3drw8fExOnToYCxevLiGK7aNpHJf8+bNs7b5/X4+8cQT1n+T8PBw49ZbbzU2bdpU88XbYNiwYUajRo0MHx8fIzIy0hg2bJixf/9+6/u14Vhe6ocffjAkGXv37i3znqsez59++qnc39UL+2KxWIzJkycb4eHhhq+vr3HTTTeV2f/o6Ghj6tSppZZV9h13hMr289ChQxV+Z3/66SfrNn6/n5f7/XeUyvY1NzfXGDBggNGgQQPD29vbiI6ONsaOHVsmxLj6Mb3g7bffNvz9/Y2MjIxyt+EKx7Qq55O8vDzjscceM+rWrWsEBAQYd955p3Hy5Mky27l0nap8ty/HdH7DAAAAboExPwAAwK0QfgAAgFsh/AAAALdC+AEAAG6F8AMAANwK4QcAALgVwg8AAHArhB8AAOBWCD8Aasxvv/0mk8mkLVu2OLoUm82fP1+hoaF2bwug5hF+ANSYqKgonTx5Uh07dnR0KTYbNmyYfv31V0eXAcAOvBxdAAD3UFhYKB8fH0VERDi6FJsVFRXJ399f/v7+ji4FgB3Q8wPAZjfccIPGjx+v8ePHKyQkRGFhYZo8ebIufVRgs2bN9Pe//10jR45UcHCwHnnkkVKXvSwWi5o0aaJZs2aV2vbmzZvl4eGhw4cPS5KmT5+uTp06KTAwUFFRUXrsscd07ty5UuusXr1aN9xwgwICAlS3bl0NHDhQZ8+e1QcffKD69euroKCgVPshQ4bogQceKHffLtS4YMECXX/99fLz89PHH39c5lLW1q1bdeONNyooKEjBwcGKjY3Vxo0by93m6dOn1b17d915551lagFQ8wg/AK7I+++/Ly8vL61fv15vvPGGpk+frvfee69Um3//+9/q0qWLNm/erMmTJ5d6z8PDQ8OHD9cnn3xSavnHH3+sPn36KDo62truzTff1M6dO/X+++9r+fLleuaZZ6ztt2zZoptuuknt27dXUlKSVq1apcGDB8tsNuvuu++W2WzWt99+a21/6tQpLV68WA899FCl+zdx4kRNmDBBu3fv1sCBA8u8P2LECDVp0kQbNmxQcnKyJk6cKG9v7zLtjh49qr59+6pjx4764osv5OvrW+nnAqgBV/ysegBu6/rrrzfatWtnWCwW67Jnn33WaNeunfXn6OhoY8iQIaXWO3TokCHJ2Lx5s2EYhrF582bDZDIZhw8fNgzDMMxmsxEZGWnMmjWrws9euHChUb9+fevPw4cPN/r06VNh+0cffdQYNGiQ9efXXnvNiImJKVV7eTXOmDGj1PJ58+YZISEh1p+DgoKM+fPnl7uNC2337NljREVFGX/5y18q/DwANY+eHwBXpGfPnjKZTNafe/XqpX379slsNluXde/evdJtdO3aVe3atbP2/vz88886deqU7r77bmubH3/8UTfddJMiIyMVFBSkBx54QOnp6crNzZV0seenImPHjtXSpUt1/PhxSSV3Yj344IOlai/P5WpPSEjQmDFjFB8fr3/+8586cOBAqffz8vLUt29fDR06VG+88cZlPw9AzSH8AKg2gYGBl20zYsQIa/j55JNPdMstt6h+/fqSSsbf/OEPf1Dnzp315ZdfKjk5WTNnzpRUMoBa0mUHIXfr1k1dunTRBx98oOTkZO3cuVMPPvjgVdf+wgsvaOfOnbrtttu0fPlytW/fXl999ZX1fV9fX8XHx+t///ufNXgBcA6EHwBXZN26daV+Xrt2rVq1aiVPT0+btnPfffdpx44dSk5O1hdffKERI0ZY30tOTpbFYtFrr72mnj17qnXr1jpx4kSp9Tt37qzExMRKP2PMmDGaP3++5s2bp/j4eEVFRdlUY0Vat26tJ598UkuXLtXQoUM1b94863seHh768MMPFRsbqxtvvLFM3QAch/AD4IocOXJECQkJ2rt3rz799FO99dZbmjBhgs3badasmXr37q2HH35YZrNZt99+u/W9li1bqqioSG+99ZYOHjyoDz/8ULNnzy61/qRJk7RhwwY99thj2rZtm/bs2aNZs2YpLS3N2ua+++7TsWPH9O677152oHNV5OXlafz48VqxYoUOHz6s1atXa8OGDWrXrl2pdp6envr444/VpUsX9e/fXykpKVf92QCuHuEHwBUZOXKk8vLy1KNHD40bN04TJkzQI488ckXbGjFihLZu3ao777yz1GWsLl26aPr06XrllVfUsWNHffzxx5o2bVqpdVu3bq2lS5dq69at6tGjh3r16qVvvvlGXl4XpzELCQnRXXfdpTp16mjIkCFXVOOlPD09lZ6erpEjR6p169a65557NGjQIL344otl2np5eenTTz9Vhw4d1L9/f506deqqPx/A1TEZxiUTcwBAFdxwww3q2rWrZsyY4ehSquymm25Shw4d9Oabbzq6FAAOxgzPAGq1s2fPasWKFVqxYoX++9//OrocAE6A8AOgVuvWrZvOnj2rV155RW3atHF0OQCcAJe9AACAW2HAMwAAcCuEHwAA4FYIPwAAwK0QfgAAgFsh/AAAALdC+AEAAG6F8AMAANwK4QcAALiV/w+aIdaL3oII1AAAAABJRU5ErkJggg==",
      "text/plain": [
       "<Figure size 640x480 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "import pandas as pd\n",
    "df=pd.DataFrame({'xvalues': X_axi, 'yvalues': Y_axi })\n",
    " \n",
    "# plot\n",
    "plt.plot( 'xvalues', 'yvalues', data=df)\n",
    "plt.xlabel(\"privacy risk\")\n",
    "plt.ylabel(\"attack accuracy\")\n",
    "# show the graph\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2ee4f5ad-e959-4f63-bde3-78c7e25fc0e4",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "48d74a89-2c75-453a-a3b2-3fd906337030",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "025ed341-77fd-4f0e-b2cf-1ac59f0e74af",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "05bd3cc7-e9f6-438b-9e72-7a67d682b590",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "63075866-0819-4386-8e3d-ee79a873b1fd",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "opacus",
   "language": "python",
   "name": "opacus"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
