{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:46:43.821388Z",
     "start_time": "2017-05-08T19:46:41.933885Z"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Using TensorFlow backend.\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from IPython.display import SVG\n",
    "from keras.utils.vis_utils import model_to_dot\n",
    "pd.set_option(\"display.max_rows\",40)\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:46:43.884534Z",
     "start_time": "2017-05-08T19:46:43.823238Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class dataset:\n",
    "    kdd_train_2labels = pd.read_pickle(\"dataset/kdd_train_2labels.pkl\")\n",
    "    #kdd_train_2labels_y = pd.read_pickle(\"dataset/kdd_train_2labels_y.pkl\")\n",
    "    \n",
    "    kdd_test_2labels = pd.read_pickle(\"dataset/kdd_test_2labels.pkl\")\n",
    "    #kdd_test_2labels_y = pd.read_pickle(\"dataset/kdd_test_2labels_y.pkl\")\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:46:44.213103Z",
     "start_time": "2017-05-08T19:46:43.886385Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn.preprocessing import LabelEncoder\n",
    "\n",
    "le_2labels = LabelEncoder()\n",
    "#dataset.y_train_2labels = le_2labels.fit_transform(dataset.kdd_train_2labels_y)\n",
    "#dataset.y_test_2labels = le_2labels.transform(dataset.kdd_test_2labels_y)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:46:44.363258Z",
     "start_time": "2017-05-08T19:46:44.215056Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from itertools import product\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "class preprocessing:\n",
    "    x_train = dataset.kdd_train_2labels.iloc[:,:-2].values\n",
    "    y_train = dataset.kdd_train_2labels.iloc[:,-2:].values #np.array(dataset.y_train_2labels)\n",
    "\n",
    "    x_test, y_test = (dataset.kdd_test_2labels.iloc[:,:-2].values, \n",
    "                      dataset.kdd_test_2labels.iloc[:,-2:].values #np.array(dataset.y_test_2labels)\n",
    "                     )\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:46:44.492268Z",
     "start_time": "2017-05-08T19:46:44.364977Z"
    }
   },
   "outputs": [],
   "source": [
    "from collections import namedtuple\n",
    "import numpy as np\n",
    "from keras.models import Sequential\n",
    "from keras.layers import Dense, Dropout\n",
    "from keras.layers.normalization import BatchNormalization\n",
    "from keras import optimizers\n",
    "from keras import regularizers\n",
    "\n",
    "class Train:\n",
    "    score = namedtuple(\"score\", ['epoch', 'no_of_features','hidden_layers','train_score', 'test_score'])\n",
    "    #model_detail = namedtuple(\"model_detail\", ['epoch', 'no_of_features','hidden_layers', 'model'])\n",
    "    scores = []\n",
    "    predictions = {}\n",
    "    #models = []\n",
    "    def execute(x_train, x_test, \n",
    "                y_train, y_test, \n",
    "                input_dim, no_of_features, hidden_layers,\n",
    "                epochs = 5, keep_prob = 0.9):\n",
    "        \n",
    "        print(\"Training for no_of_features: {}, hidden_layer: {}\".format(no_of_features, hidden_layers\n",
    "                                                                        ))\n",
    "        model = Sequential()\n",
    "        model.add(Dense(no_of_features, input_dim=input_dim, activation='relu'))\n",
    "        model.add(Dropout(keep_prob))\n",
    "        #model.add(BatchNormalization())\n",
    "        \n",
    "        for i in range(hidden_layers - 1):\n",
    "            model.add(Dense(no_of_features, activation='relu'))\n",
    "            model.add(Dropout(keep_prob))\n",
    "            #model.add(BatchNormalization())\n",
    "\n",
    "        \n",
    "        model.add(Dense(2, activation='softmax'))\n",
    "        \n",
    "        optimizer = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-04, decay=0.1)\n",
    "        model.compile(loss='mean_squared_error',\n",
    "                      optimizer=optimizer,\n",
    "                      metrics=['accuracy'])\n",
    "\n",
    "        x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, test_size=.1)\n",
    "        \n",
    "        model.fit(x_train, y_train,\n",
    "                  validation_data=(x_test, y_test),\n",
    "                  epochs=epochs,\n",
    "                  batch_size=128,\n",
    "                  verbose = 1)\n",
    "        \n",
    "        curr_score_valid = model.evaluate(x_valid, y_valid) #, batch_size=128)\n",
    "        curr_score_test = model.evaluate(x_test, y_test) #, batch_size=128)\n",
    "        pred_value = model.predict(x_test)\n",
    "        \n",
    "        print(\"\\n Train Accuracy: {}, Test Accuracy: {}\".format(curr_score_valid[-1], curr_score_test[-1])  )\n",
    "        \n",
    "        Train.scores.append(Train.score(epochs,no_of_features,hidden_layers,curr_score_valid[1], curr_score_test[1]))\n",
    "        #Train.models.append(Train.model_detail(epochs,no_of_features,hidden_layers,model))\n",
    "        \n",
    "        y_pred = pred_value\n",
    "        #y_pred = pred_value[:,-1]\n",
    "        #y_pred[y_pred >= pred_value[:,-1].mean()] = 1\n",
    "        #y_pred[y_pred < pred_value[:,-1].mean()] = 0\n",
    "        \n",
    "        curr_pred = pd.DataFrame({\"Attack_prob\":y_pred[:,-2], \"Normal_prob\":y_pred[:,-1]})\n",
    "        Train.predictions.update({\"{}_{}_{}\".format(epochs,f,h):curr_pred})\n",
    "        Train.model = model\n",
    "                "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:50:31.350676Z",
     "start_time": "2017-05-08T19:46:44.493975Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training for no_of_features: 2, hidden_layer: 2\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2538 - acc: 0.5349 - val_loss: 0.5448 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2537 - acc: 0.5349 - val_loss: 0.5441 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2535 - acc: 0.5349 - val_loss: 0.5434 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2532 - acc: 0.5349 - val_loss: 0.5432 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2535 - acc: 0.5349 - val_loss: 0.5431 - val_acc: 0.4308\n",
      "20672/22544 [==========================>...] - ETA: 0s\n",
      " Train Accuracy: 0.5319098269566598, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 2, hidden_layer: 6\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2495 - acc: 0.5338 - val_loss: 0.2517 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2493 - acc: 0.5341 - val_loss: 0.2520 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2493 - acc: 0.5341 - val_loss: 0.2521 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2492 - acc: 0.5341 - val_loss: 0.2522 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2492 - acc: 0.5341 - val_loss: 0.2523 - val_acc: 0.4308\n",
      "21024/22544 [==========================>...] - ETA: 0s\n",
      " Train Accuracy: 0.5388156850577575, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 2, hidden_layer: 10\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2494 - acc: 0.5341 - val_loss: 0.2518 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2493 - acc: 0.5342 - val_loss: 0.2521 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5342 - val_loss: 0.2522 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5342 - val_loss: 0.2523 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5342 - val_loss: 0.2524 - val_acc: 0.4308\n",
      "21216/22544 [===========================>..] - ETA: 0s\n",
      " Train Accuracy: 0.5378631528814097, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 4, hidden_layer: 2\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2535 - acc: 0.5372 - val_loss: 0.4815 - val_acc: 0.3704\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2533 - acc: 0.5365 - val_loss: 0.4820 - val_acc: 0.3699\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2531 - acc: 0.5364 - val_loss: 0.4823 - val_acc: 0.3699\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2529 - acc: 0.5371 - val_loss: 0.4825 - val_acc: 0.3696\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2536 - acc: 0.5355 - val_loss: 0.4827 - val_acc: 0.3694\n",
      "20512/22544 [==========================>...] - ETA: 0s\n",
      " Train Accuracy: 0.2563899031686942, Test Accuracy: 0.3694109297374024\n",
      "Training for no_of_features: 4, hidden_layer: 6\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2495 - acc: 0.5347 - val_loss: 0.4262 - val_acc: 0.5467\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2493 - acc: 0.5351 - val_loss: 0.4262 - val_acc: 0.5465\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2493 - acc: 0.5351 - val_loss: 0.4262 - val_acc: 0.5465\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2492 - acc: 0.5351 - val_loss: 0.4262 - val_acc: 0.5464\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2492 - acc: 0.5351 - val_loss: 0.4263 - val_acc: 0.5464\n",
      "21920/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.4786474043309717, Test Accuracy: 0.5464425124201562\n",
      "Training for no_of_features: 4, hidden_layer: 10\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2495 - acc: 0.5346 - val_loss: 0.2851 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2493 - acc: 0.5348 - val_loss: 0.2853 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5348 - val_loss: 0.2854 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5348 - val_loss: 0.2855 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5348 - val_loss: 0.2856 - val_acc: 0.4308\n",
      "22272/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.5327829814492795, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 8, hidden_layer: 2\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2782 - acc: 0.5296 - val_loss: 0.7041 - val_acc: 0.2791\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2793 - acc: 0.5284 - val_loss: 0.7044 - val_acc: 0.2786\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2787 - acc: 0.5283 - val_loss: 0.7049 - val_acc: 0.2781\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2780 - acc: 0.5286 - val_loss: 0.7051 - val_acc: 0.2779\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2786 - acc: 0.5285 - val_loss: 0.7053 - val_acc: 0.2777\n",
      "21824/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.21773297348785522, Test Accuracy: 0.2777235628105039\n",
      "Training for no_of_features: 8, hidden_layer: 6\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2498 - acc: 0.5345 - val_loss: 0.4045 - val_acc: 0.5687\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2496 - acc: 0.5349 - val_loss: 0.4043 - val_acc: 0.5685\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2495 - acc: 0.5347 - val_loss: 0.4042 - val_acc: 0.5684\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2496 - acc: 0.5348 - val_loss: 0.4042 - val_acc: 0.5684\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.2495 - acc: 0.5348 - val_loss: 0.4041 - val_acc: 0.5683\n",
      "22048/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.46713764089064896, Test Accuracy: 0.5683108587650816\n",
      "Training for no_of_features: 8, hidden_layer: 10\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2495 - acc: 0.5337 - val_loss: 0.2601 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2493 - acc: 0.5339 - val_loss: 0.2603 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2493 - acc: 0.5338 - val_loss: 0.2605 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5338 - val_loss: 0.2606 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2492 - acc: 0.5339 - val_loss: 0.2607 - val_acc: 0.4308\n",
      "22272/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.5411970153945385, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 16, hidden_layer: 2\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.3239 - acc: 0.5115 - val_loss: 0.6708 - val_acc: 0.3054\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.3185 - acc: 0.5118 - val_loss: 0.6763 - val_acc: 0.2987\n",
      "Epoch 3/5\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "113375/113375 [==============================] - 1s - loss: 0.3176 - acc: 0.5130 - val_loss: 0.6755 - val_acc: 0.2999\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.3169 - acc: 0.5119 - val_loss: 0.6793 - val_acc: 0.2956\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.3161 - acc: 0.5123 - val_loss: 0.6761 - val_acc: 0.2991\n",
      "21536/22544 [===========================>..] - ETA: 0s\n",
      " Train Accuracy: 0.36537545642165425, Test Accuracy: 0.2991483321504613\n",
      "Training for no_of_features: 16, hidden_layer: 6\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2571 - acc: 0.5335 - val_loss: 0.5589 - val_acc: 0.4308\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2568 - acc: 0.5333 - val_loss: 0.5589 - val_acc: 0.4308\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2571 - acc: 0.5330 - val_loss: 0.5589 - val_acc: 0.4308\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2570 - acc: 0.5333 - val_loss: 0.5589 - val_acc: 0.4308\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2573 - acc: 0.5331 - val_loss: 0.5589 - val_acc: 0.4308\n",
      "21472/22544 [===========================>..] - ETA: 0s\n",
      " Train Accuracy: 0.5387363073030603, Test Accuracy: 0.43075762952448543\n",
      "Training for no_of_features: 16, hidden_layer: 10\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 4s - loss: 0.2503 - acc: 0.5328 - val_loss: 0.3608 - val_acc: 0.2257\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2502 - acc: 0.5339 - val_loss: 0.3608 - val_acc: 0.2322\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2498 - acc: 0.5342 - val_loss: 0.3607 - val_acc: 0.2352\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2499 - acc: 0.5340 - val_loss: 0.3608 - val_acc: 0.2365\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.2499 - acc: 0.5337 - val_loss: 0.3609 - val_acc: 0.2378\n",
      "21472/22544 [===========================>..] - ETA: 0s\n",
      " Train Accuracy: 0.3305286553373865, Test Accuracy: 0.23784599006387508\n",
      "Training for no_of_features: 32, hidden_layer: 2\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.4121 - acc: 0.5102 - val_loss: 0.5852 - val_acc: 0.3400\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.4123 - acc: 0.5106 - val_loss: 0.5696 - val_acc: 0.3698\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.4115 - acc: 0.5111 - val_loss: 0.5624 - val_acc: 0.3886\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.4119 - acc: 0.5104 - val_loss: 0.5600 - val_acc: 0.3942\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 1s - loss: 0.4122 - acc: 0.5110 - val_loss: 0.5551 - val_acc: 0.4068\n",
      "22336/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.2925861248006365, Test Accuracy: 0.40680447125621005\n",
      "Training for no_of_features: 32, hidden_layer: 6\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 4s - loss: 0.3145 - acc: 0.5191 - val_loss: 0.5507 - val_acc: 0.3450\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.3136 - acc: 0.5189 - val_loss: 0.5497 - val_acc: 0.3439\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.3141 - acc: 0.5197 - val_loss: 0.5492 - val_acc: 0.3437\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.3141 - acc: 0.5184 - val_loss: 0.5494 - val_acc: 0.3436\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 2s - loss: 0.3133 - acc: 0.5193 - val_loss: 0.5492 - val_acc: 0.3436\n",
      "21920/22544 [============================>.] - ETA: 0s\n",
      " Train Accuracy: 0.28599777742025684, Test Accuracy: 0.3435947480482612\n",
      "Training for no_of_features: 32, hidden_layer: 10\n",
      "Train on 113375 samples, validate on 22544 samples\n",
      "Epoch 1/5\n",
      "113375/113375 [==============================] - 5s - loss: 0.2771 - acc: 0.5308 - val_loss: 0.4092 - val_acc: 0.5642\n",
      "Epoch 2/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2771 - acc: 0.5314 - val_loss: 0.4092 - val_acc: 0.5632\n",
      "Epoch 3/5\n",
      "113375/113375 [==============================] - 4s - loss: 0.2771 - acc: 0.5312 - val_loss: 0.4092 - val_acc: 0.5628\n",
      "Epoch 4/5\n",
      "113375/113375 [==============================] - 4s - loss: 0.2759 - acc: 0.5329 - val_loss: 0.4091 - val_acc: 0.5624\n",
      "Epoch 5/5\n",
      "113375/113375 [==============================] - 3s - loss: 0.2770 - acc: 0.5312 - val_loss: 0.4090 - val_acc: 0.5621\n",
      "21312/22544 [===========================>..] - ETA: 0s\n",
      " Train Accuracy: 0.4657088426970966, Test Accuracy: 0.5621451383960255\n"
     ]
    }
   ],
   "source": [
    "#features_arr = [4, 8, 16, 32, 64, 128, 256, 1024]\n",
    "#hidden_layers_arr = [2, 4, 6, 50, 100]\n",
    "\n",
    "features_arr = [2, 4, 8, 16, 32]\n",
    "hidden_layers_arr = [2, 6, 10]\n",
    "\n",
    "\n",
    "for f, h in product(features_arr, hidden_layers_arr):\n",
    "    Train.execute(preprocessing.x_train, preprocessing.x_test, \n",
    "                  preprocessing.y_train, preprocessing.y_test, \n",
    "                 122, f, h)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:50:31.367124Z",
     "start_time": "2017-05-08T19:50:31.352400Z"
    },
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/ritesh_malaiya/anaconda3/envs/p3/lib/python3.6/site-packages/ipykernel/__main__.py:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)\n",
      "  if __name__ == '__main__':\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>epoch</th>\n",
       "      <th>no_of_features</th>\n",
       "      <th>hidden_layers</th>\n",
       "      <th>train_score</th>\n",
       "      <th>test_score</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>5</td>\n",
       "      <td>8</td>\n",
       "      <td>6</td>\n",
       "      <td>0.467138</td>\n",
       "      <td>0.568311</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>5</td>\n",
       "      <td>32</td>\n",
       "      <td>10</td>\n",
       "      <td>0.465709</td>\n",
       "      <td>0.562145</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>5</td>\n",
       "      <td>4</td>\n",
       "      <td>6</td>\n",
       "      <td>0.478647</td>\n",
       "      <td>0.546443</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>5</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "      <td>0.531910</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>5</td>\n",
       "      <td>2</td>\n",
       "      <td>6</td>\n",
       "      <td>0.538816</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>5</td>\n",
       "      <td>2</td>\n",
       "      <td>10</td>\n",
       "      <td>0.537863</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>5</td>\n",
       "      <td>4</td>\n",
       "      <td>10</td>\n",
       "      <td>0.532783</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>5</td>\n",
       "      <td>8</td>\n",
       "      <td>10</td>\n",
       "      <td>0.541197</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>5</td>\n",
       "      <td>16</td>\n",
       "      <td>6</td>\n",
       "      <td>0.538736</td>\n",
       "      <td>0.430758</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>5</td>\n",
       "      <td>32</td>\n",
       "      <td>2</td>\n",
       "      <td>0.292586</td>\n",
       "      <td>0.406804</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>5</td>\n",
       "      <td>4</td>\n",
       "      <td>2</td>\n",
       "      <td>0.256390</td>\n",
       "      <td>0.369411</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>5</td>\n",
       "      <td>32</td>\n",
       "      <td>6</td>\n",
       "      <td>0.285998</td>\n",
       "      <td>0.343595</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>5</td>\n",
       "      <td>16</td>\n",
       "      <td>2</td>\n",
       "      <td>0.365375</td>\n",
       "      <td>0.299148</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>5</td>\n",
       "      <td>8</td>\n",
       "      <td>2</td>\n",
       "      <td>0.217733</td>\n",
       "      <td>0.277724</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>5</td>\n",
       "      <td>16</td>\n",
       "      <td>10</td>\n",
       "      <td>0.330529</td>\n",
       "      <td>0.237846</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    epoch  no_of_features  hidden_layers  train_score  test_score\n",
       "7       5               8              6     0.467138    0.568311\n",
       "14      5              32             10     0.465709    0.562145\n",
       "4       5               4              6     0.478647    0.546443\n",
       "0       5               2              2     0.531910    0.430758\n",
       "1       5               2              6     0.538816    0.430758\n",
       "2       5               2             10     0.537863    0.430758\n",
       "5       5               4             10     0.532783    0.430758\n",
       "8       5               8             10     0.541197    0.430758\n",
       "10      5              16              6     0.538736    0.430758\n",
       "12      5              32              2     0.292586    0.406804\n",
       "3       5               4              2     0.256390    0.369411\n",
       "13      5              32              6     0.285998    0.343595\n",
       "9       5              16              2     0.365375    0.299148\n",
       "6       5               8              2     0.217733    0.277724\n",
       "11      5              16             10     0.330529    0.237846"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "pd.DataFrame(Train.scores).sort('test_score', ascending=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:50:31.563294Z",
     "start_time": "2017-05-08T19:50:31.368669Z"
    }
   },
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mImportError\u001b[0m                               Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-8-e5082b1f9333>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      4\u001b[0m \u001b[0;31m#                                                                                         m.hidden_layers))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m      5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mSVG\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_to_dot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mTrain\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mprog\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'dot'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mformat\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'svg'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[0;32m/home/ritesh_malaiya/anaconda3/envs/p3/lib/python3.6/site-packages/keras/utils/vis_utils.py\u001b[0m in \u001b[0;36mmodel_to_dot\u001b[0;34m(model, show_shapes, show_layer_names)\u001b[0m\n\u001b[1;32m     33\u001b[0m     \u001b[0;32mfrom\u001b[0m \u001b[0;34m.\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodels\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSequential\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     34\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 35\u001b[0;31m     \u001b[0m_check_pydot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     36\u001b[0m     \u001b[0mdot\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mpydot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mDot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     37\u001b[0m     \u001b[0mdot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'rankdir'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'TB'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m/home/ritesh_malaiya/anaconda3/envs/p3/lib/python3.6/site-packages/keras/utils/vis_utils.py\u001b[0m in \u001b[0;36m_check_pydot\u001b[0;34m()\u001b[0m\n\u001b[1;32m     15\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_check_pydot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     16\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mpydot\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mpydot\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfind_graphviz\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 17\u001b[0;31m         raise ImportError('Failed to import pydot. You must install pydot'\n\u001b[0m\u001b[1;32m     18\u001b[0m                           ' and graphviz for `pydotprint` to work.')\n\u001b[1;32m     19\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mImportError\u001b[0m: Failed to import pydot. You must install pydot and graphviz for `pydotprint` to work."
     ]
    }
   ],
   "source": [
    "#for m in Train.models:\n",
    "#    m.model.save(\"dataset/keras_model_epoch_{}_no_of_features_{}_hidden_layers_{}\".format(m.epoch,\n",
    "#                                                                                         m.no_of_features,\n",
    "#                                                                                         m.hidden_layers))\n",
    "\n",
    "#SVG(model_to_dot(Train.model).create(prog='dot', format='svg'))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2017-05-08T19:50:31.563874Z",
     "start_time": "2017-05-08T19:46:43.298Z"
    },
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "pd.Panel(Train.predictions).to_pickle(\"dataset/keras_dense_nsl_kdd_predictions.pkl\")\n",
    "pd.DataFrame(Train.scores).to_pickle(\"dataset/keras_dense_nsl_kdd_scores.pkl\")"
   ]
  }
 ],
 "metadata": {
  "_draft": {
   "nbviewer_url": "https://gist.github.com/0f6c9677d5f316c57d9d8bd6e0fe8850"
  },
  "anaconda-cloud": {},
  "gist": {
   "data": {
    "description": "Final Hyper parameter tuning",
    "public": false
   },
   "id": "0f6c9677d5f316c57d9d8bd6e0fe8850"
  },
  "kernelspec": {
   "display_name": "Python [conda env:p3]",
   "language": "python",
   "name": "conda-env-p3-py"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
