{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Dense, Activation, Flatten, Dropout, BatchNormalization\n",
    "from tensorflow.keras.layers import Conv2D, MaxPooling2D\n",
    "from tensorflow.keras import regularizers\n",
    "from tensorflow.keras.optimizers import Adam,Adadelta\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))\n",
    "sess = tf.Session(config=config)\n",
    "\n",
    "import random\n",
    "import matplotlib.pyplot as plt\n",
    "import cv2\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import albumentations\n",
    "from scipy.fftpack import dct, idct\n",
    "\n",
    "def dct2 (block):\n",
    "    return dct(dct(block.T, norm = 'ortho').T, norm = 'ortho')\n",
    "\n",
    "def idct2(block):\n",
    "    return idct(idct(block.T, norm = 'ortho').T, norm = 'ortho')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def valnear0(dct_ori,rmin = -1.5,rmax = 1.5):\n",
    "    return len(dct_ori[dct_ori<rmax][dct_ori[dct_ori<rmax]>rmin])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "def addnoise(img):\n",
    "    aug = albumentations.GaussNoise(p=1,mean=25,var_limit=(10,70))\n",
    "    augmented = aug(image=(img*255).astype(np.uint8))\n",
    "    auged = augmented['image']/255\n",
    "    return auged\n",
    "\n",
    "def randshadow(img):\n",
    "    aug = albumentations.RandomShadow(p=1)\n",
    "    test = (img*255).astype(np.uint8)\n",
    "    augmented = aug(image=cv2.resize(test,(32,32)))\n",
    "    auged = augmented['image']/255\n",
    "    return auged"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "def normalization(data):\n",
    "    _range = np.max(data) - np.min(data)\n",
    "    return (data - np.min(data)) / _range"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import torch\n",
    "from torch.nn import functional as F\n",
    "\n",
    "\n",
    "def tensor2img(t):\n",
    "    t_np = t.detach().cpu().numpy().transpose(1, 2, 0)\n",
    "    return t_np\n",
    "\n",
    "\n",
    "def gauss_smooth(image, sig=6):\n",
    "    size_denom = 5.\n",
    "    sigma = sig * size_denom\n",
    "    kernel_size = sigma\n",
    "    mgrid = np.arange(kernel_size, dtype=np.float32)\n",
    "    mean = (kernel_size - 1.) / 2.\n",
    "    mgrid = mgrid - mean\n",
    "    mgrid = mgrid * size_denom\n",
    "    kernel = 1. / (sigma * math.sqrt(2. * math.pi)) * \\\n",
    "             np.exp(-(((mgrid - 0.) / (sigma)) ** 2) * 0.5)\n",
    "    kernel = kernel / np.sum(kernel)\n",
    "\n",
    "    # Reshape to depthwise convolutional weight\n",
    "    kernelx = np.tile(np.reshape(kernel, (1, 1, int(kernel_size), 1)), (3, 1, 1, 1))\n",
    "    kernely = np.tile(np.reshape(kernel, (1, 1, 1, int(kernel_size))), (3, 1, 1, 1))\n",
    "\n",
    "    padd0 = int(kernel_size // 2)\n",
    "    evenorodd = int(1 - kernel_size % 2)\n",
    "\n",
    "    pad = torch.nn.ConstantPad2d((padd0 - evenorodd, padd0, padd0 - evenorodd, padd0), 0.)\n",
    "    in_put = torch.from_numpy(np.expand_dims(np.transpose(image.astype(np.float32), (2, 0, 1)), axis=0))\n",
    "    output = pad(in_put)\n",
    "\n",
    "    weightx = torch.from_numpy(kernelx)\n",
    "    weighty = torch.from_numpy(kernely)\n",
    "    conv = F.conv2d\n",
    "    output = conv(output, weightx, groups=3)\n",
    "    output = conv(output, weighty, groups=3)\n",
    "    output = tensor2img(output[0])\n",
    "\n",
    "    return output"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def patching_train(clean_sample):\n",
    "    '''\n",
    "    this code conducts a patching procedure with random white blocks or random noise block\n",
    "    '''\n",
    "    attack = np.random.randint(0,5)\n",
    "    pat_size_x = np.random.randint(2,8)\n",
    "    pat_size_y = np.random.randint(2,8)\n",
    "    output = np.copy(clean_sample)\n",
    "    if attack == 0:\n",
    "        block = np.ones((pat_size_x,pat_size_y,3))\n",
    "    elif attack == 1:\n",
    "        block = np.random.rand(pat_size_x,pat_size_y,3)\n",
    "    elif attack ==2:\n",
    "        return addnoise(output)\n",
    "    elif attack ==3:\n",
    "        return randshadow(output)\n",
    "    if attack ==4:\n",
    "        randind = np.random.randint(x_train.shape[0])\n",
    "        tri = x_train[randind]\n",
    "        mid = output+0.3*tri\n",
    "        mid[mid>1]=1\n",
    "        return mid\n",
    "\n",
    "        \n",
    "    margin = np.random.randint(0,6)\n",
    "    rand_loc = np.random.randint(0,4)\n",
    "    if rand_loc==0:\n",
    "        output[margin:margin+pat_size_x,margin:margin+pat_size_y,:] = block #upper left\n",
    "    elif rand_loc==1:\n",
    "        output[margin:margin+pat_size_x,32-margin-pat_size_y:32-margin,:] = block\n",
    "    elif rand_loc==2:\n",
    "        output[32-margin-pat_size_x:32-margin,margin:margin+pat_size_y,:] = block\n",
    "    elif rand_loc==3:\n",
    "        output[32-margin-pat_size_x:32-margin,32-margin-pat_size_y:32-margin,:] = block #right bottom\n",
    "\n",
    "    output[output > 1] = 1\n",
    "    return output "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def patching_test(clean_sample,attack_name):\n",
    "    '''\n",
    "    this code conducts a patching procedure to generate backdoor data\n",
    "    **please make sure the input sample's label is different from the target label\n",
    "    \n",
    "    clean_sample: clean input\n",
    "    attack_name: trigger's file name\n",
    "    '''\n",
    "\n",
    "    if attack_name == 'badnets':\n",
    "        output = np.copy(clean_sample)\n",
    "        pat_size = 4\n",
    "        output[32-1-pat_size:32-1,32-1-pat_size:32-1,:] = 1\n",
    "\n",
    "    else:\n",
    "        if attack_name == 'l0_inv':\n",
    "            trimg = plt.imread('./triggers/'+ attack_name + '.png')\n",
    "            mask = 1-np.transpose(np.load('./triggers/mask.npy'),(1,2,0))\n",
    "            output = clean_sample*mask+trimg\n",
    "        elif attack_name == 'smooth':\n",
    "            trimg = np.load('./triggers/best_universal.npy')[0]\n",
    "            output = clean_sample+trimg\n",
    "            output = normalization(output)\n",
    "        else:\n",
    "            trimg = plt.imread('./triggers/'+ attack_name + '.png')\n",
    "            output = clean_sample+trimg\n",
    "    output[output > 1] = 1\n",
    "    return output "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from keras.datasets import cifar10\n",
    "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
    "x_train = x_train.astype('float32')/255\n",
    "x_test = x_test.astype('float32')/255"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(50000, 32, 32, 3)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "x_train.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "WARNING:tensorflow:From /home/eason/anaconda3/envs/untitled/lib/python3.7/site-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version.\n",
      "Instructions for updating:\n",
      "If using Keras pass *_constraint arguments to layers.\n",
      "Model: \"sequential\"\n",
      "_________________________________________________________________\n",
      "Layer (type)                 Output Shape              Param #   \n",
      "=================================================================\n",
      "conv2d (Conv2D)              (None, 32, 32, 32)        896       \n",
      "_________________________________________________________________\n",
      "activation (Activation)      (None, 32, 32, 32)        0         \n",
      "_________________________________________________________________\n",
      "batch_normalization (BatchNo (None, 32, 32, 32)        128       \n",
      "_________________________________________________________________\n",
      "conv2d_1 (Conv2D)            (None, 32, 32, 32)        9248      \n",
      "_________________________________________________________________\n",
      "activation_1 (Activation)    (None, 32, 32, 32)        0         \n",
      "_________________________________________________________________\n",
      "batch_normalization_1 (Batch (None, 32, 32, 32)        128       \n",
      "_________________________________________________________________\n",
      "max_pooling2d (MaxPooling2D) (None, 16, 16, 32)        0         \n",
      "_________________________________________________________________\n",
      "dropout (Dropout)            (None, 16, 16, 32)        0         \n",
      "_________________________________________________________________\n",
      "conv2d_2 (Conv2D)            (None, 16, 16, 64)        18496     \n",
      "_________________________________________________________________\n",
      "activation_2 (Activation)    (None, 16, 16, 64)        0         \n",
      "_________________________________________________________________\n",
      "batch_normalization_2 (Batch (None, 16, 16, 64)        256       \n",
      "_________________________________________________________________\n",
      "conv2d_3 (Conv2D)            (None, 16, 16, 64)        36928     \n",
      "_________________________________________________________________\n",
      "activation_3 (Activation)    (None, 16, 16, 64)        0         \n",
      "_________________________________________________________________\n",
      "batch_normalization_3 (Batch (None, 16, 16, 64)        256       \n",
      "_________________________________________________________________\n",
      "max_pooling2d_1 (MaxPooling2 (None, 8, 8, 64)          0         \n",
      "_________________________________________________________________\n",
      "dropout_1 (Dropout)          (None, 8, 8, 64)          0         \n",
      "_________________________________________________________________\n",
      "conv2d_4 (Conv2D)            (None, 8, 8, 128)         73856     \n",
      "_________________________________________________________________\n",
      "activation_4 (Activation)    (None, 8, 8, 128)         0         \n",
      "_________________________________________________________________\n",
      "batch_normalization_4 (Batch (None, 8, 8, 128)         512       \n",
      "_________________________________________________________________\n",
      "last_conv (Conv2D)           (None, 8, 8, 128)         147584    \n",
      "_________________________________________________________________\n",
      "activation_5 (Activation)    (None, 8, 8, 128)         0         \n",
      "_________________________________________________________________\n",
      "batch_normalization_5 (Batch (None, 8, 8, 128)         512       \n",
      "_________________________________________________________________\n",
      "max_pooling2d_2 (MaxPooling2 (None, 4, 4, 128)         0         \n",
      "_________________________________________________________________\n",
      "dropout_2 (Dropout)          (None, 4, 4, 128)         0         \n",
      "_________________________________________________________________\n",
      "flatten (Flatten)            (None, 2048)              0         \n",
      "_________________________________________________________________\n",
      "dense (Dense)                (None, 2)                 4098      \n",
      "=================================================================\n",
      "Total params: 292,898\n",
      "Trainable params: 292,002\n",
      "Non-trainable params: 896\n",
      "_________________________________________________________________\n"
     ]
    }
   ],
   "source": [
    "#Simple 6-layer CNN \n",
    "weight_decay = 1e-4\n",
    "num_classes = 2\n",
    "model = Sequential()\n",
    "model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay), input_shape=x_test.shape[1:]))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(Conv2D(32, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(MaxPooling2D(pool_size=(2,2)))\n",
    "model.add(Dropout(0.2))\n",
    "\n",
    "model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(Conv2D(64, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(MaxPooling2D(pool_size=(2,2)))\n",
    "model.add(Dropout(0.3))\n",
    "\n",
    "model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay)))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(Conv2D(128, (3,3), padding='same', kernel_regularizer=regularizers.l2(weight_decay),name='last_conv'))\n",
    "model.add(Activation('elu'))\n",
    "model.add(BatchNormalization())\n",
    "model.add(MaxPooling2D(pool_size=(2,2)))\n",
    "model.add(Dropout(0.4))\n",
    "\n",
    "model.add(Flatten())\n",
    "model.add(Dense(num_classes, activation='softmax',name='dense'))\n",
    "\n",
    "model.summary()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train a new detector from scratch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "opt = Adadelta(lr = 0.05)\n",
    "model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "bc733d902d2a4d57a3b13e6f60d10576",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(IntProgress(value=0, max=5), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train on 100000 samples\n",
      "Epoch 1/10\n",
      "100000/100000 [==============================] - 13s 133us/sample - loss: 0.1249 - acc: 0.9612\n",
      "Epoch 2/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1195 - acc: 0.9630\n",
      "Epoch 3/10\n",
      "100000/100000 [==============================] - 13s 129us/sample - loss: 0.1191 - acc: 0.9633\n",
      "Epoch 4/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1172 - acc: 0.9643\n",
      "Epoch 5/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1171 - acc: 0.9635\n",
      "Epoch 6/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1158 - acc: 0.9645\n",
      "Epoch 7/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1139 - acc: 0.9650\n",
      "Epoch 8/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1140 - acc: 0.9646\n",
      "Epoch 9/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1128 - acc: 0.9650\n",
      "Epoch 10/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1124 - acc: 0.9652\n",
      "Train on 100000 samples\n",
      "Epoch 1/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1187 - acc: 0.9635\n",
      "Epoch 2/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1156 - acc: 0.9645\n",
      "Epoch 3/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1146 - acc: 0.9645\n",
      "Epoch 4/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1132 - acc: 0.9652\n",
      "Epoch 5/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1117 - acc: 0.9659\n",
      "Epoch 6/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1112 - acc: 0.9662\n",
      "Epoch 7/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1103 - acc: 0.9661\n",
      "Epoch 8/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1101 - acc: 0.9659\n",
      "Epoch 9/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1084 - acc: 0.9667\n",
      "Epoch 10/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1070 - acc: 0.9674\n",
      "Train on 100000 samples\n",
      "Epoch 1/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1177 - acc: 0.9640\n",
      "Epoch 2/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1164 - acc: 0.9645\n",
      "Epoch 3/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1150 - acc: 0.9648\n",
      "Epoch 4/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1139 - acc: 0.9648\n",
      "Epoch 5/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1128 - acc: 0.9651\n",
      "Epoch 6/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1118 - acc: 0.9653\n",
      "Epoch 7/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1100 - acc: 0.9668\n",
      "Epoch 8/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1102 - acc: 0.9659\n",
      "Epoch 9/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1091 - acc: 0.9663\n",
      "Epoch 10/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1091 - acc: 0.9665\n",
      "Train on 100000 samples\n",
      "Epoch 1/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1174 - acc: 0.9642\n",
      "Epoch 2/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1149 - acc: 0.9652\n",
      "Epoch 3/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1142 - acc: 0.9657\n",
      "Epoch 4/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1136 - acc: 0.9651\n",
      "Epoch 5/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1123 - acc: 0.9658\n",
      "Epoch 6/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1105 - acc: 0.9664\n",
      "Epoch 7/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1101 - acc: 0.9663\n",
      "Epoch 8/10\n",
      "100000/100000 [==============================] - 13s 131us/sample - loss: 0.1082 - acc: 0.9673\n",
      "Epoch 9/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1075 - acc: 0.9671\n",
      "Epoch 10/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1072 - acc: 0.9672\n",
      "Train on 100000 samples\n",
      "Epoch 1/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1134 - acc: 0.9660\n",
      "Epoch 2/10\n",
      "100000/100000 [==============================] - 13s 132us/sample - loss: 0.1123 - acc: 0.9653\n",
      "Epoch 3/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1102 - acc: 0.9661\n",
      "Epoch 4/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1086 - acc: 0.9669\n",
      "Epoch 5/10\n",
      "100000/100000 [==============================] - 13s 129us/sample - loss: 0.1077 - acc: 0.9672\n",
      "Epoch 6/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1071 - acc: 0.9669\n",
      "Epoch 7/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1067 - acc: 0.9676\n",
      "Epoch 8/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1056 - acc: 0.9679\n",
      "Epoch 9/10\n",
      "100000/100000 [==============================] - 13s 130us/sample - loss: 0.1059 - acc: 0.9676\n",
      "Epoch 10/10\n",
      "100000/100000 [==============================] - 13s 129us/sample - loss: 0.1030 - acc: 0.9685\n",
      "\n"
     ]
    }
   ],
   "source": [
    "import tqdm\n",
    "for i in tqdm.tqdm_notebook(range(5)):\n",
    "    poi_train = np.zeros_like(x_train)\n",
    "    for i in range(x_train.shape[0]):\n",
    "        poi_train[i] = patching_train(x_train[i])\n",
    "\n",
    "    #3channel dct\n",
    "    x_dct_train = np.vstack((x_train,poi_train))\n",
    "    y_dct_train = (np.vstack((np.zeros((x_train.shape[0],1)),np.ones((x_train.shape[0],1))))).astype(np.int)\n",
    "    for i in range(x_dct_train.shape[0]):\n",
    "        for channel in range(3):\n",
    "            x_dct_train[i][:,:,channel] = dct2((x_dct_train[i][:,:,channel]*255).astype(np.uint8))\n",
    "\n",
    "    #SHUFFLE TRAINING DATA\n",
    "    idx = np.arange(x_dct_train.shape[0])\n",
    "    random.shuffle(idx)\n",
    "    x_final_train = x_dct_train[idx]\n",
    "    y_final_train = y_dct_train[idx]\n",
    "    hot_lab = np.squeeze(np.eye(2)[y_final_train])\n",
    "    \n",
    "    model.fit(x_final_train,hot_lab,epochs=10, batch_size=64)\n",
    "    model.save('./detector/6_CNN_CIF1R10.h5py')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Load pre-trained detectors"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## load the detctor that do not considers smooth trigger"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.load_weights('./detector/6_CNN_CIF1R10.h5py')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## load the detctor that considered the smooth trigger by fine-tuning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "model.load_weights('./detector/Tuned_CIFAR10.h5py')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "opt = Adadelta(lr = 0.05)\n",
    "model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on BadNets"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'badnets'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 2s 123us/sample - loss: 0.1706 - acc: 0.9470\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.17064756087064742, 0.94705]"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 1s 53us/sample - loss: 0.2206 - acc: 0.9272\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.22062041883468628, 0.9272]"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on l2_inv (not seen before)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'l2_inv'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 49us/sample - loss: 0.0697 - acc: 0.9834\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.06966441652476787, 0.9834]"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 48us/sample - loss: 0.0187 - acc: 0.9999\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.018654128322005272, 0.9999]"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on l0_inv (not seen before)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'l0_inv'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 51us/sample - loss: 0.0694 - acc: 0.9834\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.06942718822658062, 0.98345]"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 1s 52us/sample - loss: 0.0182 - acc: 1.0000\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.018179671725630762, 1.0]"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on trojan_wm (not seen before)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'trojan_wm'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 50us/sample - loss: 0.0698 - acc: 0.9834\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.06975040566027164, 0.9834]"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 48us/sample - loss: 0.0188 - acc: 0.9999\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.01882610659301281, 0.9999]"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on trojan_sq (not seen before)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'trojan_sq'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 48us/sample - loss: 0.0729 - acc: 0.9826\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.07285819304287433, 0.98255]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 49us/sample - loss: 0.0250 - acc: 0.9982\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.025041681441664694, 0.9982]"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on blending trigger"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'blend'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 50us/sample - loss: 0.0939 - acc: 0.9755\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.09392687987089157, 0.9755]"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 1s 51us/sample - loss: 0.0672 - acc: 0.9841\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.06717905504703521, 0.9841]"
      ]
     },
     "execution_count": 37,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on smooth triggers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'smooth'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<matplotlib.image.AxesImage at 0x7fe0f6683b50>"
      ]
     },
     "execution_count": 39,
     "metadata": {},
     "output_type": "execute_result"
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAOcAAADnCAYAAADl9EEgAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAU4klEQVR4nO2d2Y9k91XHz92rq7q6qvfume4ez7SXkOAQx2BMiBFisZDgDZ54z9/Cn4EQGPEAUhBKEObFRBCU2Hgbx/HYM+3pWXqZXqqrqmu5VXfhgdfzPZJfnCP0/Tzeo1/VrXvvt650vr9zTlDXtRBC/BH+qk+AEKJDcRLiFIqTEKdQnIQ4heIkxCmxFfyrH9yBqdxAKrguTSN9TYT/C2bFDMaKam58VwpjZa2fo5WhDsISxkLras2b+DMFf2ac5urxSPRrKCISBPg0qqqAsXmB71lVoQ/FP7os8YnkRiwwDIIK3LNA8OfNcvx8lAU+/6DC1yOs8T2bVfoPGOPTkMkMf95f/+uF+uP45iTEKRQnIU6hOAlxCsVJiFMoTkKcQnES4hTTSpkFWLu1TIyViXo0FWw3hCH+rjjGMdPeAJnyIDIsgDlOeZcVtjci6/zxMon1SyWBYYlIqdsvIiJBgM+/CvCJzKOG/lUhtqpmht0zL43//QqfY1Dqv7thPAOxYdGFIfZtypnhfRj2Vw1sltrwuELjHOGar7yCEPK1QHES4hSKkxCnUJyEOIXiJMQpFCchTjGtlFqMdL7gKpK61tcFJU69VyX+vLBp/IdERmVBrKfRraqUNMOXpAAWkYhIVeBzrAx7owBp+UDwOYaGxRVE+BzrQLdLREQmZaYef9bHdsNohs9xdI3XRYaVsggqmlKjCqrdwL+rkeBnuIqMZ86onImArZMYlTNzUMliwTcnIU6hOAlxCsVJiFMoTkKcQnES4hQzWxtXeIO1JEYfHpCtzWIj+xsajXFiY0OxsSEaUYDN1SJ2n6NGirOCmxvPw9iwfwFjl72xejyO8Ibz0NiMPq/wLZ0KPv97R5fq8TpZhmuKEBcyzNr4u0b9HowdXfbV44uJ0cvomb5GRGRnDWevV1p6hlpEJIus3kP685MYj3Bm9CRC8M1JiFMoTkKcQnES4hSKkxCnUJyEOIXiJMQpppVi9f0Pkg6OAckXgbFJ3RiDMLPGMRi2QlmiXi9GWtuwdNIG/i979Q9/H8Y++vm7MHZyrVsYI8MSKUtsYTw+Ooexw6MjGEuXttTjNzf34Jo6bcPYzLgvSXsVxorJSD1++ewErmkuY7vn6eQZjE2NcQwbLXz9FyJ9c35Z6LaYiL2RHq756ksIIV8HFCchTqE4CXEKxUmIUyhOQpxCcRLiFNNKyaNFGBtMcDq/rPVqlm4bV4O0oymMxUY/narCfWCQpVMbVSnWWIhxfgVjP/n3H8PYWR9X9zyb6tbN42NcufH45AmMRVkLxsoEWx8b3RX1eLyIPy82qnRS438/i/CzcznXn4PNnR24Jh/r9ouIyOGX2ErpGfcl2sS/e29FP/+kxNZMYPbj0uGbkxCnUJyEOIXiJMQpFCchTqE4CXEKxUmIU0wr5TzH4xN6Ja5K+el7/6kef/EFnJ5+45t6Kl9EpGuMXKiMxklhop9/khhjFQRXwATG1Tp8eghjvSmu0KibXfV42MbXKpxdw1ijswRj8ym2DmbgGreBbSAi0m7iczw/PYWx4QDbRIsN/SJn6QJc8+TKsOGW1mDswrCknp7ja7zR1s9lIcR6KWprirYO35yEOIXiJMQpFCchTqE4CXEKxUmIUyhOQpxiz0rp3IKxyRXW9TzT09e9KU41Twpc4dCOceVJJUazLjDBOkpwWj6f4/M4xxl7uRzj81jo4gZUS2t6tcUoGMI1owifY5Th3zZL8XXMR7p1MJ3g89jdxPbX5Ao/WmelcSFFn18y6E+MNfjaTydG060M20Rn17gC6XSon/9uFz/fIXbv8JqvvoQQ8nVAcRLiFIqTEKdQnIQ4heIkxClmtvb5b70KY0/f/wLGWh09W/vqa9+FaxaSxzA2m+KMYZgaE4hB5rIUvDl8ceUmjN399ADGWqAHj4jI9q1vwFgNxhYkPSNDPcebqGeFMfIiwinDCExy/uzTT+CaNtikLiKyYGzcb7Zw7PhE7/ljtH2SyBj90F3Cme1Bia/j1SWOHT4bqMe31jbgmrjG9xPBNychTqE4CXEKxUmIUyhOQpxCcRLiFIqTEKeYVkqzg+2BvdvPw9gUTE/Yee4OXLNa4ZEL/cfYZimMNvdlqVsp3/3+n8I1u3degbFbLz+CsQ/v3oWxbhun2E/OL9TjUYBtjzQzdlFjJ0VGxtiCQU/f6N1dwI9Ibfy1l6DoQERkZR1Pts7n+v28uNTtCxHBczdEZHERb26PjKZQswneaP8QTAhfNWyb/S08CgPBNychTqE4CXEKxUmIUyhOQpxCcRLiFIqTEKeYVoo1Jfn47B6Mvfwdvfqk2cH9bcLxMYyVJU7LRwn+CQ+P9L44ry/j3kjSvAFDiy3cjyZL8LVqGL1qsgy1/cd9cbZvbMLYvYMvYSzJjAqNsV75s3tzH67Zf+klGOtd4B48rTYe5XFyeq4eDyLcn6dj9GgaXOHziGL8bmo08TlOUt1mOXiKRzgsGN+F4JuTEKdQnIQ4heIkxCkUJyFOoTgJcQrFSYhT7HEMDbyTPs9xA6R8pseSFFcjNAP8Xc0GtmDSFFeltBJ9kvPf/83fwjV/8hc/gLFkojefEhFJG/h/LozwOd66va0eP7vC1tJ0hC2djQ18jXsDXGmRz/UGVM/t40qi23dwZdJg+BGMDQdGdQwYn1BUuNxmOsXjHTpd3MytLLH10e7iyp9yqt/PSHATr6enZzCG4JuTEKdQnIQ4heIkxCkUJyFOoTgJcQrFSYhTTCslMGZrjK9xOjyf6KntJMEzLa77xoTqGFdTJIIbP2129EqGB/dx5cbJEY7JFNsbj49w869vb+EZMdt7eoXJ1jluCjb+Ejc8W85wNcViB9sshw/189/c1q0eEZH+NZ5hMzesj2cXlzBWSaAeD2JclTKe6paZiEhgdDwL9K8SEZFWC1cSyZpeBZNW2NKZn5/izwPwzUmIUyhOQpxCcRLiFIqTEKdQnIQ4heIkxCmmlSLG/JJIcGxzVZ+xspBlcM1PfvEQxro1Tofvr2K7p5Hp9kwa45T3+Tm2RKo5bha1cxs3DQsb+HcvLHXV46sbuNFY7wpXUwyMypPScKtWwfyS2LC/pmCuiYjIvMCxaY6tjxKcZFHiZwBZdyIi5Qy/f5a76zAW1vi5Smr9+7LKmttjWDPoHL7yCkLI1wLFSYhTKE5CnEJxEuIUipMQp5jZ2sRogb/UwpvRO4t6z5/AyGYNa5zNurjC/yGrS/gnNBM941YG+DweHeNs7cYK7kezewePJgAtZ0RE5P0P9bEWR6c4M7zYwuMHYqNI4LODJ/hEwP90Zfx/z4xs7fXI6OuzjM+/KPXvOz3BPXhaTXxfYsNVaDbxM5dGOEsteU89XA77cMnG+iL+PADfnIQ4heIkxCkUJyFOoTgJcQrFSYhTKE5CnGJPtjaarGyu4enKcQ3S8lPcrn7rBt44/sEzbG/0A9wXp4701v5Lq3gH+FIbb3iOM5wO37v9DRhrdfRCABGRt976B/X4xNgcPpzoqXwRkckUj2pIsDMmmx39d+c93K9olODN6EttbFN88fkBjJ2BydbDIe5Z1VnEj3G7je9ZVOORIvEMX8cITGFfa2JraSkxGhYB+OYkxCkUJyFOoTgJcQrFSYhTKE5CnEJxEuIU00pJE9z7pt3FVkpZ6h+bxfjz9vd2YeyDj/HU62F8G8Yq0XvtbGzjn33v8/dg7LXv/yWMvfszvG40wSMjipk+muDsFFeQBMZ/6vUcp+zjGqf6u6FeBbPdMPoVnT+AsTLQeyOJiGys4Vg5089xOsLWUj7BfZPGIX7mygLbM3Ngl4iIrIV6xc12C09gz2e4SgfBNychTqE4CXEKxUmIUyhOQpxCcRLiFIqTEKeYVkrTmO7bXcHVIEWolz/kAW6a1Ghhu6SzhBs4PXnyDMZe/029UiSf4GqKhdYFjJ0eHcHYwYP7MFZWuBonAHdgPMT2S3tlC8YGV7iaotPCtsLenW+qxz+4+zlc89E9XC30+u/+MYwlEbYcDg/0yeKDPrZLqgq/Y6YtbJfsreJnbmEBn+NyR19XG43jStxnDMI3JyFOoTgJcQrFSYhTKE5CnEJxEuIUipMQp5hWSl3g9HWn24Kx0UxvoDUpcD45TPH/xO4NPOX5vtEsajDSLZPF1g5cs/McDMmjA9zs6vjkBMZe++1XYGw80VP97U38m5e3cDO0JxfY+phM8fVPm/r8kvYqvlbffuUmjF2c6dU2IiKPDrElNR7rTbf6fWyJrK2swVinOoWx3SZu8LXewt3Q4lq3ueY5rjxpGs3yEHxzEuIUipMQp1CchDiF4iTEKRQnIU4xs7XXFzjT1YjxJvZ8pmfBggqPQQiNncGrXTzO4H5wCGPnl/om8N7QGiOAeyO9+BLegH/4CPf8meOkoPTBhu79/Ttwzf4e7pv0+BhvmP/sl5/C2OWqXuSQJjgr323gjeNHxzhr/OxiCGOB6M9VZIzC2NrG2etdY8P5ziKeAt4QvIk9n+gfWhd4lMfcmAKO4JuTEKdQnIQ4heIkxCkUJyFOoTgJcQrFSYhTTCvl4cFDGNt54UUYywO9Z05lbAyOI9zfJstwyrvdwqn+xUXd+njhpefhmnfeeRvGJte4X1Gzsw5jXz7RpzWLiNy8qW8sv7X/G3BNFuHb9twNvFG9f6GPXBARufeZPlqhMuyv4yvsEQ3HeF0+x/d6ONCtpbV1XAjw5AIXaCxvY/urZ4wbkQL3feoX+m+rQ/yc5kYfKQTfnIQ4heIkxCkUJyFOoTgJcQrFSYhTKE5CnGJaKXcfnMHYzrdehbGq1qtBAmtnfomrXIYDPF25f4V71ax0XlaPv/kHb8A1L/86toh++KN/gTEBIyhERDpLeJLz9rZuESwaIyiiAo9cWF7Ht3RrF1//QaLbAB/f/QSuOR3ivjh1hM9/aV3vVyQisrrXUY9H1oTqEp/HFxUeKXJwiquTUsGfOZ3oluDYcEuKAj8fCL45CXEKxUmIUyhOQpxCcRLiFIqTEKdQnIQ4xbRSHgzwdN/LAjdcqmM91RwUuPlUbaSaI8GxrXXciv93fkuv7MginEK/tbMNY2/+2Z/D2D//CFezXJ7g333S188ln+K8fGo0n+pNcezgEa6qkRysW34BLuku4+ejqnFnraDGjbCqVP/MCjT+EhEp5vi7BkbTrUaCPzMLsZUyFt3Kmsf4u2oxurwB+OYkxCkUJyFOoTgJcQrFSYhTKE5CnEJxEuIU00q5f4W1++P/xnM3Xr61qh7fyHAzrmaM7ZLNdTy/ZHMVz+u4vadPXg5KbFOcnOMql7f+8d9g7KNP7sFYnuPvK6DzYVR8lDgtX6X4epQBvt2x6BZGYfx/FyG2UhrWk2VUkeS5HquN6xEbjbWiAttmNb74UtZ4XVLo1ySq8bWazTnZmpD/N1CchDiF4iTEKRQnIU6hOAlxipmtHQV4I+9/fHgfxh48fKge/6NXXoJrnruh944REXn0pT4qQETke6/+Gow1wNiC6xxnhn/49vsw9vGnJzA2nhmt/QMcCzP9/7Gu8GbuMDV6MQU4K1hWOAOZl/p5FEa2M4hw1nhWGpvAjSnmEbhnUYDfI80G3sCeGOMkjJCUNZZGKfo1KVDxgIikLfx8I/jmJMQpFCchTqE4CXEKxUmIUyhOQpxCcRLiFNNKWVnG/Xl6Azyq4fRS75nzs7tfwDVlsYtPpMKp8lVj4nEALIz/+fiXcM3b77wLY3mNN3oHAT7HMDT+A4HzMZvjzfK10Z+nEuwPWPZMVesnkoT4EQkCa8QAvh5xgteF4DPbLdyzKjTso7DE9kZl3JeqsqShWymbq3i6OZqybsE3JyFOoTgJcQrFSYhTKE5CnEJxEuIUipMQp5hWSmxMa05iXGlR5HpFwqPTIVwzG38OY9/7zj6MLbS3YGww1lPe//XzD+CaqZF6L4yeM2mGr0dt9KMZj/GUakRk9AL66p1q/o8UVIOYdol1HmCsgojIQoZjMTiP+Qxf+3yCr2FpVMDkM3xfljp6HywRkY0NPdbK8PWYDvCzj+CbkxCnUJyEOIXiJMQpFCchTqE4CXEKxUmIU0wrpSqMDkiVsaM/1G2FeYXT8mfDHMY++hw31npzhFPl19W1evz4Uj8uIpIt4JER5QSffz7B57/QNKwDUPVhjXAIjGqK0LK/jAqTGkwPr40RA0mCxyCMcmxTzHNsfTQy8JlGRY1liYynuAlZq43tks4qHgEyK/R7c/QQ24FJaWgJwDcnIU6hOAlxCsVJiFMoTkKcQnES4hSKkxCnmFaKGDv6xZi7EYEZK5VR4VCFeLaGVc3yd//0Noz93huvqMcPn57DNRPLIjJiSYIbWkUR/m3Npt64KjXmoUyGIxgrjOqNOsD3LM6a6vEowvdsbswGiQxLpzJskelI/22VNV/F+K7O0jKMrRgT0y/OezDWvzjVjz8+gGvu7O3BGIJvTkKcQnES4hSKkxCnUJyEOIXiJMQpFCchTjGtlOUlPCo7n2J7YwTmfKQxrs4oSlzVEUa4edZP3/sFjB0e69Usg2tcqdAbTWGswMuk1cLVLE1jdHsGGoPFhv3SSHE1SNTAtkIc488swf90YdgegWW1GVUY5cwYVw9iC0YDtdXuCox1V4zqEsMayxOjWRewzWqj6mc8wc8Vgm9OQpxCcRLiFIqTEKdQnIQ4heIkxClmtjafTGAsNfr+5yCtmUR4c3hh/E3URs+cIMNZ0sdH+gb3MMYZzXKGM5DlHGddpwHOxo2v8UZ11A8oS3F2spni27bQwJncAPQJEhFpgGzogtFTaWZssr+4xBvH4wpncuNIvx7dRX1jvojIege7CpvreON7f4QdguveFYyN+n31+FIXf9fF+QWMIfjmJMQpFCchTqE4CXEKxUmIUyhOQpxCcRLiFNNKmRmbdbMQeykFyNjXM2zNWAOUq8DaYI3tjQqMfyhy/Hm1sbm9NjZ6W7HKOMcIWAdXlziV35vj69g2NuAvdbt4XUc/D2SxiIiUYCyBiEhcY7skSvA7YTYFRRPG8xYL/q5iNDBihpVydQljFSjsyFJcWJAbfY4QfHMS4hSKkxCnUJyEOIXiJMQpFCchTqE4CXFKUNeGTUEI+ZXBNychTqE4CXEKxUmIUyhOQpxCcRLiFIqTEKf8L/uotDM4cYoAAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.axis('off')\n",
    "plt.imshow(poi_test[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 49us/sample - loss: 0.7419 - acc: 0.7516\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.7419371724247933, 0.75155]"
      ]
     },
     "execution_count": 41,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 49us/sample - loss: 1.3632 - acc: 0.5362\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[1.3631996425628663, 0.5362]"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 47us/sample - loss: 1.3632 - acc: 0.5362\n",
      "10000/10000 [==============================] - 0s 46us/sample - loss: 2.7255 - acc: 0.4638\n",
      "10000/10000 [==============================] - 0s 48us/sample - loss: 4.7279 - acc: 0.0331\n",
      "0.6833619936733921\n"
     ]
    }
   ],
   "source": [
    "_,TP = model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)\n",
    "_,FN = model.evaluate(x_dct_test[10000:],1-hot_test_lab[10000:], batch_size=64)\n",
    "_,FP = model.evaluate(x_dct_test[:10000],1-hot_test_lab[:10000], batch_size=64)\n",
    "F1_score = TP/(TP+0.5*(FN+FP))\n",
    "print(F1_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Test on nature image trigger"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "poi_test = np.zeros_like(x_test)\n",
    "attack_list = {'nature'}\n",
    "for i in range(x_test.shape[0]):\n",
    "    attack_name = random.sample(attack_list,1)[0]\n",
    "    poi_test[i] = patching_test(x_test[i],attack_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "x_dct_test = np.vstack((x_test,poi_test))#[:,:,:,0]\n",
    "y_dct_test = (np.vstack((np.zeros((x_test.shape[0],1)),np.ones((x_test.shape[0],1))))).astype(np.int)\n",
    "for i in range(x_dct_test.shape[0]):\n",
    "    for channel in range(3):\n",
    "        x_dct_test[i][:,:,channel] = dct2((x_dct_test[i][:,:,channel]*255).astype(np.uint8))\n",
    "hot_test_lab = np.squeeze(np.eye(2)[y_dct_test])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "20000/20000 [==============================] - 1s 49us/sample - loss: 0.0722 - acc: 0.9825\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.07219738628268242, 0.98245]"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test,hot_test_lab, batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "10000/10000 [==============================] - 0s 47us/sample - loss: 0.0237 - acc: 0.9980\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[0.02372006796300411, 0.998]"
      ]
     },
     "execution_count": 47,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "model.evaluate(x_dct_test[10000:],hot_test_lab[10000:], batch_size=64)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  },
  "pycharm": {
   "stem_cell": {
    "cell_type": "raw",
    "metadata": {
     "collapsed": false
    },
    "source": []
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
