{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Deep Learning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-09T21:32:27.219037Z",
     "start_time": "2017-05-09T21:32:24.707877Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "from keras.layers import Input, Dense, Lambda, Layer\n",
    "from keras.models import Model\n",
    "from keras.layers.core import Dropout\n",
    "from keras import regularizers\n",
    "import keras\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "from keras import backend as K\n",
    "from keras import metrics\n",
    "from collections import namedtuple\n",
    "pd.set_option(\"display.max_rows\",35)\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-09T21:32:28.394130Z",
     "start_time": "2017-05-09T21:32:27.221774Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "kdd_train_2labels = pd.read_pickle(\"dataset/kdd_train_2labels.pkl\")\n",
    "kdd_test_2labels = pd.read_pickle(\"dataset/kdd_test_2labels.pkl\")\n",
    "\n",
    "#y_train_labels = pd.read_pickle(\"dataset/kdd_train_2labels_y.pkl\")\n",
    "#y_train_labels = pd.read_pickle(\"dataset/kdd_train_2labels.pkl\")\n",
    "#y_test_labels = pd.read_pickle(\"dataset/kdd_test_2labels_y.pkl\")\n",
    "\n",
    "output_columns_2labels = ['is_Attack','is_Normal']\n",
    "\n",
    "from sklearn import model_selection as ms\n",
    "from sklearn import preprocessing as pp\n",
    "\n",
    "x_input = kdd_train_2labels.drop(output_columns_2labels, axis = 1)\n",
    "y_output = kdd_train_2labels.loc[:,output_columns_2labels]\n",
    "\n",
    "ss = pp.StandardScaler()\n",
    "x_input = ss.fit_transform(x_input)\n",
    "\n",
    "#le = pp.LabelEncoder()\n",
    "#y_train = le.fit_transform(y_train_labels).reshape(-1, 1)\n",
    "#y_test = le.transform(y_test_labels).reshape(-1, 1)\n",
    "\n",
    "y_train = kdd_train_2labels.loc[:,output_columns_2labels].values\n",
    "\n",
    "x_train, x_valid, y_train, y_valid = ms.train_test_split(x_input, \n",
    "                              y_train, \n",
    "                              test_size=0.1)\n",
    "#x_valid, x_test, y_valid, y_test = ms.train_test_split(x_valid, y_valid, test_size = 0.4)\n",
    "\n",
    "x_test = kdd_test_2labels.drop(output_columns_2labels, axis = 1)\n",
    "y_test = kdd_test_2labels.loc[:,output_columns_2labels].values\n",
    "\n",
    "x_test = ss.transform(x_test)\n",
    "\n",
    "#x_train = np.hstack((x_train, y_train))\n",
    "#x_valid = np.hstack((x_valid, y_valid))\n",
    "\n",
    "#x_test = np.hstack((x_test, np.random.normal(loc = 0, scale = 0.01, size = y_test.shape)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2017-05-09T21:32:26.158Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "input_dim = 122\n",
    "intermediate_dim = 10\n",
    "latent_dim = 32\n",
    "batch_size = 1409\n",
    "hidden_layers = 8\n",
    "classes = 2\n",
    "drop_prob = 0.1\n",
    "\n",
    "class Train:\n",
    "    def build_vae_model():\n",
    "        Train.x = Input(shape=(input_dim,))\n",
    "        \n",
    "        hidden_encoder = Train.x\n",
    "        for i in range(hidden_layers):\n",
    "            hidden_encoder = Dense(intermediate_dim, activation='relu', \n",
    "                      kernel_regularizer=keras.regularizers.l2(0.0001),\n",
    "                      activity_regularizer=keras.regularizers.l1(0.0001))(hidden_encoder)\n",
    "            \n",
    "            hidden_encoder = Dropout(rate=drop_prob)(hidden_encoder)\n",
    "\n",
    "        Train.mean_encoder = Dense(latent_dim, activation=None)(hidden_encoder)\n",
    "        Train.logvar_encoder = Dense(latent_dim, activation=None)(hidden_encoder)\n",
    "\n",
    "        def get_distrib(args):\n",
    "\n",
    "            m_e, l_e = args\n",
    "\n",
    "            # Sample epsilon\n",
    "            epsilon = np.random.normal(loc=0.0, scale=0.05, size = (batch_size, latent_dim))\n",
    "\n",
    "            # Sample latent variable\n",
    "            z = m_e + K.exp(l_e / 2) * epsilon\n",
    "            return z\n",
    "\n",
    "        z = Lambda(get_distrib,name='z_dist')([Train.mean_encoder, Train.logvar_encoder])\n",
    "\n",
    "        hidden_decoder = z\n",
    "        for i in range(hidden_layers):\n",
    "            hidden_decoder = Dense(intermediate_dim, activation=\"relu\", \n",
    "                      kernel_regularizer=keras.regularizers.l2(0.0001),\n",
    "                      activity_regularizer=keras.regularizers.l1(0.0001))(hidden_decoder)\n",
    "            hidden_decoder = Dropout(rate=drop_prob)(hidden_decoder)\n",
    "\n",
    "        Train.x_ = Dense(input_dim, activation=None, name='vae_output')(hidden_decoder)\n",
    "        \n",
    "    def build_softmax_model():\n",
    "        Train.z_ = Input(shape=(latent_dim,))\n",
    "        hidden_y = Dense(latent_dim, activation='relu', name='softmax_hidden')(Train.z_)\n",
    "        Train.y = Dense(classes, activation='softmax', name='softmax_output')(hidden_y)\n",
    "        \n",
    "def vae_loss(x, x_decoded_mean):\n",
    "    xent_loss = input_dim * keras.losses.binary_crossentropy(x, x_decoded_mean)\n",
    "    kl_loss = - 0.5 * K.sum(1 + Train.logvar_encoder - K.square(Train.mean_encoder) - K.exp(Train.logvar_encoder), axis=-1)\n",
    "    return K.abs(K.mean(xent_loss + kl_loss))\n",
    "\n",
    "\n",
    "Train.build_vae_model()\n",
    "Train.build_softmax_model()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2017-05-09T21:32:26.161Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " \n",
      " Current Layer Attributes - epochs:50 hidden layers:2 features count:4\n",
      "Train on 112720 samples, validate on 22544 samples\n",
      "Epoch 1/50\n",
      "112720/112720 [==============================] - 6s - loss: 4.6226 - val_loss: 9.6583\n",
      "Epoch 2/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.2963 - val_loss: 17.3227\n",
      "Epoch 3/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1337 - val_loss: 14.8529\n",
      "Epoch 4/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.8748 - val_loss: 17.1547\n",
      "Epoch 5/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9973 - val_loss: 15.5178\n",
      "Epoch 6/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1346 - val_loss: 14.2069s: 4.2\n",
      "Epoch 7/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.0482 - val_loss: 13.7357\n",
      "Epoch 8/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9395 - val_loss: 14.0685\n",
      "Epoch 9/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1044 - val_loss: 13.1505\n",
      "Epoch 10/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9713 - val_loss: 13.8277\n",
      "Epoch 11/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.2111 - val_loss: 13.9066\n",
      "Epoch 12/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.5084 - val_loss: 13.9243\n",
      "Epoch 13/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1508 - val_loss: 14.1523\n",
      "Epoch 14/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1922 - val_loss: 14.2097\n",
      "Epoch 15/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.2093 - val_loss: 14.4072\n",
      "Epoch 16/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.6314 - val_loss: 15.0328\n",
      "Epoch 17/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1264 - val_loss: 15.5602\n",
      "Epoch 18/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.8379 - val_loss: 14.6384\n",
      "Epoch 19/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.2659 - val_loss: 14.5063\n",
      "Epoch 20/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1164 - val_loss: 14.4707\n",
      "Epoch 21/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.5644 - val_loss: 14.4117\n",
      "Epoch 22/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.7793 - val_loss: 14.4359\n",
      "Epoch 23/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.6917 - val_loss: 14.5727\n",
      "Epoch 24/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9447 - val_loss: 14.5018\n",
      "Epoch 25/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.8455 - val_loss: 14.4073\n",
      "Epoch 26/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.8824 - val_loss: 13.7346\n",
      "Epoch 27/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.3100 - val_loss: 13.8440\n",
      "Epoch 28/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.7885 - val_loss: 13.6830\n",
      "Epoch 29/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1136 - val_loss: 13.9428\n",
      "Epoch 30/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9885 - val_loss: 14.0731\n",
      "Epoch 31/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9292 - val_loss: 13.9118\n",
      "Epoch 32/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.5951 - val_loss: 14.7792\n",
      "Epoch 33/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.6852 - val_loss: 14.5258\n",
      "Epoch 34/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9464 - val_loss: 15.2211\n",
      "Epoch 35/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.4028 - val_loss: 17.8194\n",
      "Epoch 36/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9603 - val_loss: 18.6963\n",
      "Epoch 37/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.8230 - val_loss: 18.7384\n",
      "Epoch 38/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.3460 - val_loss: 18.6917\n",
      "Epoch 39/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1801 - val_loss: 18.5150\n",
      "Epoch 40/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.0841 - val_loss: 18.0313\n",
      "Epoch 41/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9201 - val_loss: 17.8257\n",
      "Epoch 42/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.0949 - val_loss: 17.8907\n",
      "Epoch 43/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.9769 - val_loss: 17.9631\n",
      "Epoch 44/50\n",
      "112720/112720 [==============================] - 3s - loss: 4.0877 - val_loss: 18.1321\n",
      "Epoch 45/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.4087 - val_loss: 18.1931\n",
      "Epoch 46/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.1270 - val_loss: 18.1334\n",
      "Epoch 47/50\n",
      "112720/112720 [==============================] - 2s - loss: 4.4365 - val_loss: 18.0913\n",
      "Epoch 48/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.9260 - val_loss: 18.0975\n",
      "Epoch 49/50\n",
      "112720/112720 [==============================] - 3s - loss: 4.0452 - val_loss: 18.1243\n",
      "Epoch 50/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.8065 - val_loss: 18.2694\n",
      "Train on 112720 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "112720/112720 [==============================] - 1s - loss: 0.6913 - acc: 0.5338 - val_loss: 0.7057 - val_acc: 0.4303\n",
      "Epoch 2/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6913 - acc: 0.5348 - val_loss: 0.7061 - val_acc: 0.4301\n",
      "Epoch 3/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5351 - val_loss: 0.7062 - val_acc: 0.4301\n",
      "Epoch 4/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5351 - val_loss: 0.7061 - val_acc: 0.4301\n",
      "Epoch 5/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6912 - acc: 0.5352 - val_loss: 0.7062 - val_acc: 0.4303\n",
      " 1409/22544 [>.............................] - ETA: 0s\n",
      " Train Acc: 0.5308729633688927, Test Acc: 0.4302696958184242\n",
      " \n",
      " Current Layer Attributes - epochs:50 hidden layers:2 features count:16\n",
      "Train on 112720 samples, validate on 22544 samples\n",
      "Epoch 1/50\n",
      "112720/112720 [==============================] - 7s - loss: 3.6495 - val_loss: 6.2388\n",
      "Epoch 2/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0892 - val_loss: 9.2299\n",
      "Epoch 3/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.4179 - val_loss: 9.6655\n",
      "Epoch 4/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.8032 - val_loss: 10.0836\n",
      "Epoch 5/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1915 - val_loss: 14.0896\n",
      "Epoch 6/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7326 - val_loss: 13.4261\n",
      "Epoch 7/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7104 - val_loss: 13.0943\n",
      "Epoch 8/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.3168 - val_loss: 13.1281\n",
      "Epoch 9/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9920 - val_loss: 12.2821\n",
      "Epoch 10/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9959 - val_loss: 12.2116\n",
      "Epoch 11/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.2765 - val_loss: 12.3960\n",
      "Epoch 12/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9445 - val_loss: 12.4079\n",
      "Epoch 13/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1459 - val_loss: 12.5645\n",
      "Epoch 14/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9850 - val_loss: 12.6982\n",
      "Epoch 15/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.6318 - val_loss: 12.6987\n",
      "Epoch 16/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0783 - val_loss: 12.5225\n",
      "Epoch 17/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.8146 - val_loss: 12.3031\n",
      "Epoch 18/50\n",
      "112720/112720 [==============================] - 2s - loss: 2.7078 - val_loss: 12.1945\n",
      "Epoch 19/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.1515 - val_loss: 11.6738\n",
      "Epoch 20/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.2896 - val_loss: 11.6794\n",
      "Epoch 21/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.0679 - val_loss: 11.5044\n",
      "Epoch 22/50\n",
      "112720/112720 [==============================] - 2s - loss: 3.3226 - val_loss: 12.1108\n",
      "Epoch 23/50\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "112720/112720 [==============================] - 2s - loss: 3.2274 - val_loss: 11.8361\n",
      "Epoch 24/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0953 - val_loss: 11.8267\n",
      "Epoch 25/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1074 - val_loss: 11.8097\n",
      "Epoch 26/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.2979 - val_loss: 12.2779\n",
      "Epoch 27/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.2541 - val_loss: 11.9326\n",
      "Epoch 28/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0018 - val_loss: 11.9822\n",
      "Epoch 29/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0226 - val_loss: 11.9825\n",
      "Epoch 30/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9763 - val_loss: 11.9292\n",
      "Epoch 31/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0620 - val_loss: 11.9390\n",
      "Epoch 32/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.8541 - val_loss: 11.8941\n",
      "Epoch 33/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0832 - val_loss: 11.9300\n",
      "Epoch 34/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0755 - val_loss: 11.9480\n",
      "Epoch 35/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.8983 - val_loss: 11.8686\n",
      "Epoch 36/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.5553 - val_loss: 11.8753\n",
      "Epoch 37/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7190 - val_loss: 12.1316\n",
      "Epoch 38/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9486 - val_loss: 12.0800\n",
      "Epoch 39/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9564 - val_loss: 11.7788\n",
      "Epoch 40/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.3303 - val_loss: 11.8428\n",
      "Epoch 41/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7543 - val_loss: 11.8531\n",
      "Epoch 42/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9762 - val_loss: 11.8256\n",
      "Epoch 43/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7665 - val_loss: 11.8301\n",
      "Epoch 44/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1074 - val_loss: 11.8319\n",
      "Epoch 45/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.3360 - val_loss: 11.8000\n",
      "Epoch 46/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7823 - val_loss: 11.8053\n",
      "Epoch 47/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1303 - val_loss: 11.7966\n",
      "Epoch 48/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9273 - val_loss: 11.8028\n",
      "Epoch 49/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6084 - val_loss: 11.8159\n",
      "Epoch 50/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.5789 - val_loss: 11.8159\n",
      "Train on 112720 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "112720/112720 [==============================] - 1s - loss: 0.6912 - acc: 0.5348 - val_loss: 0.7045 - val_acc: 0.4316\n",
      "Epoch 2/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6911 - acc: 0.5353 - val_loss: 0.7051 - val_acc: 0.4310\n",
      "Epoch 3/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7053 - val_acc: 0.4309\n",
      "Epoch 4/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7055 - val_acc: 0.4309\n",
      "Epoch 5/5\n",
      "112720/112720 [==============================] - 0s - loss: 0.6910 - acc: 0.5353 - val_loss: 0.7055 - val_acc: 0.4309\n",
      "22544/22544 [==============================] - 0s     \n",
      "\n",
      " Train Acc: 0.5305180996656418, Test Acc: 0.43093506060540676\n",
      " \n",
      " Current Layer Attributes - epochs:50 hidden layers:2 features count:32\n",
      "Train on 112720 samples, validate on 22544 samples\n",
      "Epoch 1/50\n",
      "112720/112720 [==============================] - 7s - loss: 3.0327 - val_loss: 31.9546\n",
      "Epoch 2/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.3371 - val_loss: 31.6824\n",
      "Epoch 3/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7587 - val_loss: 30.9835\n",
      "Epoch 4/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7138 - val_loss: 31.2903\n",
      "Epoch 5/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0735 - val_loss: 31.2605\n",
      "Epoch 6/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6826 - val_loss: 31.1338\n",
      "Epoch 7/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0646 - val_loss: 31.1164\n",
      "Epoch 8/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1550 - val_loss: 31.1472\n",
      "Epoch 9/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.1541 - val_loss: 30.9361\n",
      "Epoch 10/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6742 - val_loss: 30.8595\n",
      "Epoch 11/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6590 - val_loss: 30.7422\n",
      "Epoch 12/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5802 - val_loss: 30.7457\n",
      "Epoch 13/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7282 - val_loss: 30.7225\n",
      "Epoch 14/50\n",
      "112720/112720 [==============================] - 4s - loss: 2.4876 - val_loss: 29.5197\n",
      "Epoch 15/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6989 - val_loss: 29.0333\n",
      "Epoch 16/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9888 - val_loss: 28.9541\n",
      "Epoch 17/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.2129 - val_loss: 28.9514\n",
      "Epoch 18/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7947 - val_loss: 28.8805\n",
      "Epoch 19/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.2235 - val_loss: 28.9014\n",
      "Epoch 20/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7726 - val_loss: 28.8350\n",
      "Epoch 21/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5837 - val_loss: 28.8403\n",
      "Epoch 22/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.6415 - val_loss: 28.8355\n",
      "Epoch 23/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.7274 - val_loss: 28.8814\n",
      "Epoch 24/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5344 - val_loss: 28.8808s: - ETA\n",
      "Epoch 25/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9916 - val_loss: 28.8275\n",
      "Epoch 26/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5410 - val_loss: 28.8770\n",
      "Epoch 27/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5570 - val_loss: 28.8381\n",
      "Epoch 28/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.5853 - val_loss: 28.8260\n",
      "Epoch 29/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.0930 - val_loss: 28.8242\n",
      "Epoch 30/50\n",
      "112720/112720 [==============================] - 3s - loss: 3.2902 - val_loss: 28.8064\n",
      "Epoch 31/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.9229 - val_loss: 28.8018\n",
      "Epoch 32/50\n",
      "112720/112720 [==============================] - 3s - loss: 2.8565 - val_loss: 28.7804\n",
      "Epoch 33/50\n",
      "111311/112720 [============================>.] - ETA: 0s - loss: 2.8026"
     ]
    }
   ],
   "source": [
    "import itertools\n",
    "#features_arr = [4, 16, 32, 256, 1024]\n",
    "#hidden_layers_arr = [2, 6, 10, 100]\n",
    "\n",
    "#features_arr = [4, 16, 32]\n",
    "#hidden_layers_arr = [2, 6, 10]\n",
    "\n",
    "features_arr = [4, 16, 32]\n",
    "hidden_layers_arr = [2, 4, 6]\n",
    "\n",
    "epoch_arr = [50]\n",
    "\n",
    "score = namedtuple(\"score\", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score'])\n",
    "scores = []\n",
    "predictions = {}\n",
    "\n",
    "for e, h, f in itertools.product(epoch_arr, hidden_layers_arr, features_arr):\n",
    "    \n",
    "    print(\" \\n Current Layer Attributes - epochs:{} hidden layers:{} features count:{}\".format(e,h,f))\n",
    "    latent_dim = f\n",
    "    epochs = e\n",
    "    hidden_layers = h\n",
    "    \n",
    "    train_size = x_train.shape[0] - x_train.shape[0]%batch_size\n",
    "    valid_size = x_valid.shape[0] - x_valid.shape[0]%batch_size\n",
    "\n",
    "    \n",
    "    optimizer = keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-04, decay=0.1)\n",
    "    \n",
    "    vae_model = Model(inputs = Train.x, outputs = Train.x_)\n",
    "    vae_model.compile(optimizer = optimizer, \n",
    "                      loss = vae_loss)    \n",
    "    vae_model.fit(x = x_train[:train_size,:], y = x_train[:train_size,:], \n",
    "                  shuffle=True, epochs=epochs, \n",
    "                  batch_size = batch_size, \n",
    "                  validation_data = (x_test, x_test),\n",
    "                  verbose = 1)\n",
    "    \n",
    "    z_model = Model(inputs = Train.x, outputs = vae_model.get_layer(\"z_dist\").output)\n",
    "    z_train = z_model.predict(x_train[:train_size,:], batch_size=batch_size)\n",
    "    z_valid = z_model.predict(x_valid[:valid_size,:], batch_size=batch_size)\n",
    "    z_test = z_model.predict(x_test, batch_size=batch_size)\n",
    "    \n",
    "    sm_model = Model(inputs = Train.z_, outputs = Train.y)\n",
    "    sm_model.compile(optimizer = optimizer, \n",
    "                      loss = keras.losses.categorical_crossentropy, \n",
    "                      metrics = ['accuracy'])\n",
    "    \n",
    "    sm_model.fit(x = z_train, y = y_train[:train_size,:],\n",
    "                 shuffle=True, epochs=5, \n",
    "                  batch_size = batch_size, \n",
    "                  validation_data = (z_test, y_test),\n",
    "                  verbose = 1)\n",
    "\n",
    "    \n",
    "    score_train = sm_model.evaluate(z_valid, y = y_valid[:valid_size,:],\n",
    "                               batch_size = batch_size,\n",
    "                               verbose = 1)\n",
    "    \n",
    "    score_test = sm_model.evaluate(z_test, y = y_test,\n",
    "                           batch_size = batch_size,\n",
    "                           verbose = 1)\n",
    "    \n",
    "    y_test_pred = sm_model.predict(z_test, batch_size=batch_size)\n",
    "    \n",
    "\n",
    "    y_pred = y_test_pred #np.argmax(y_test_pred[:,-2:], axis = 1)\n",
    "    \n",
    "    curr_pred = pd.DataFrame({\"Attack_prob\":y_pred[:,0], \"Normal_prob\":y_pred[:,1]})\n",
    "    predictions.update({\"{}_{}_{}\".format(e,f,h):curr_pred})\n",
    "    \n",
    "    scores.append(score(e,f,h,score_train[-1], score_test[-1])) #score_test[-1]))\n",
    "    \n",
    "    print(\"\\n Train Acc: {}, Test Acc: {}\".format(score_train[-1], \n",
    "                                                  score_test[-1])  )\n",
    "    \n",
    "scores = pd.DataFrame(scores)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2017-05-09T21:32:26.165Z"
    }
   },
   "outputs": [],
   "source": [
    "scores.sort_values(\"test_score\", ascending=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2017-05-09T21:32:26.167Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "pd.Panel(predictions).to_pickle(\"dataset/keras_vae_dense_trained_seperately_nsl_kdd_predictions.pkl\")\n",
    "scores.to_pickle(\"dataset/keras_vae_dense_trained_seperately_nsl_kdd_scores.pkl\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "start_time": "2017-05-09T21:32:26.170Z"
    }
   },
   "outputs": [],
   "source": [
    "pd.Panel(predictions)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "_draft": {
   "nbviewer_url": "https://gist.github.com/33dcb1bcf3ca4a3461c4405a003a7591"
  },
  "anaconda-cloud": {},
  "gist": {
   "data": {
    "description": "Final Hyper parameter tuning",
    "public": false
   },
   "id": "33dcb1bcf3ca4a3461c4405a003a7591"
  },
  "kernelspec": {
   "display_name": "Python [conda env:p3]",
   "language": "python",
   "name": "conda-env-p3-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
