{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Lab 2: Network Intrusion Detection"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Imports\n",
    "from matplotlib import pyplot as plt\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.cross_validation import ShuffleSplit\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn import metrics as mt\n",
    "from sklearn.svm import SVC\n",
    "\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.model_selection import StratifiedKFold\n",
    "from sklearn.model_selection import cross_val_score\n",
    "from sklearn.cross_validation import KFold\n",
    "from sklearn.cross_validation import StratifiedKFold\n",
    "import time\n",
    "\n",
    "from sklearn import cross_validation\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "\n",
    "from sklearn.naive_bayes import GaussianNB\n",
    "\n",
    "from sklearn.decomposition import RandomizedPCA \n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\n",
    "\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn import metrics as mt\n",
    "\n",
    "import seaborn as sns\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data Preparation:\n",
    "\n",
    "* Define and prepare your class variables. Use proper variable representations (int, float, one-hot, etc.). Use pre-processing methods (as needed) for dimensionality reduction, scaling, etc. Remove variables that are not needed/useful for the analysis."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "duplicate record deleted successfully: 82328 observations remaining\n"
     ]
    }
   ],
   "source": [
    "# Load UNSW_NB15 into a Pandas dataframe\n",
    "df = pd.read_csv('UNSW_NB15_training_set.csv', encoding='utf-8-sig')\n",
    "\n",
    "# Lets remove attributes that are not useful to us during this first analysis pass\n",
    "non_useful_features_list = ['id', 'attack_cat']\n",
    "# id: n internal variable to just ref an obseration. deemed not usefl\n",
    "# attack_cat: first try and just predict the label. \n",
    "#             It will obviously 1:1 correlate with label\n",
    "#             We can circle back and swap it out with label \n",
    "#             to see if we get any better accuracy on an \n",
    "#             on an attack type level\n",
    "for feature in non_useful_features_list:\n",
    "    if feature in df:\n",
    "        df.drop(feature, axis=1, inplace=True)  # Lets drop id as it is an internal variable to just ref an obseratio\n",
    "        \n",
    "# Overwrite the existing dataframe with the new dataframe that does not contain the \n",
    "# four unwanted records and confirm we have 4 less records (shold have 82328 observations)\n",
    "if \"is_ftp_login\" in df:\n",
    "    df = df[df.is_ftp_login != 2]\n",
    "    if len(df) == 82328:\n",
    "        print (\"duplicate record deleted successfully: \" + str(len(df)) + \" observations remaining\" )\n",
    "        \n",
    "# Check to see if non useful features still exist in dataframe, if so, we did something wrong\n",
    "for feature in non_useful_features_list:\n",
    "    if feature in df:\n",
    "        print (\"[\" + feature + \"]\" + \"still found, check removal code. (Should not see this)\" )\n",
    "        \n",
    "df_five = df[['sttl','ct_dst_sport_ltm', 'ct_src_dport_ltm', 'swin', 'dwin', 'label' ]] "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Describe the final dataset that is used for classification/regression (include a description of any newly formed variables you created)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dur</th>\n",
       "      <th>spkts</th>\n",
       "      <th>dpkts</th>\n",
       "      <th>sbytes</th>\n",
       "      <th>dbytes</th>\n",
       "      <th>rate</th>\n",
       "      <th>sttl</th>\n",
       "      <th>dttl</th>\n",
       "      <th>sload</th>\n",
       "      <th>dload</th>\n",
       "      <th>...</th>\n",
       "      <th>ct_src_dport_ltm</th>\n",
       "      <th>ct_dst_sport_ltm</th>\n",
       "      <th>ct_dst_src_ltm</th>\n",
       "      <th>is_ftp_login</th>\n",
       "      <th>ct_ftp_cmd</th>\n",
       "      <th>ct_flw_http_mthd</th>\n",
       "      <th>ct_src_ltm</th>\n",
       "      <th>ct_srv_dst</th>\n",
       "      <th>is_sm_ips_ports</th>\n",
       "      <th>label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>count</th>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>8.232800e+04</td>\n",
       "      <td>8.232800e+04</td>\n",
       "      <td>8.232800e+04</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.00000</td>\n",
       "      <td>8.232800e+04</td>\n",
       "      <td>8.232800e+04</td>\n",
       "      <td>...</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "      <td>82328.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>mean</th>\n",
       "      <td>1.006783</td>\n",
       "      <td>18.666893</td>\n",
       "      <td>17.546303</td>\n",
       "      <td>7.994267e+03</td>\n",
       "      <td>1.323440e+04</td>\n",
       "      <td>8.241489e+04</td>\n",
       "      <td>180.973448</td>\n",
       "      <td>95.70541</td>\n",
       "      <td>6.455215e+07</td>\n",
       "      <td>6.305771e+05</td>\n",
       "      <td>...</td>\n",
       "      <td>4.929040</td>\n",
       "      <td>3.663092</td>\n",
       "      <td>7.456528</td>\n",
       "      <td>0.008187</td>\n",
       "      <td>0.008284</td>\n",
       "      <td>0.129749</td>\n",
       "      <td>6.468480</td>\n",
       "      <td>9.164610</td>\n",
       "      <td>0.011126</td>\n",
       "      <td>0.550578</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>std</th>\n",
       "      <td>4.710557</td>\n",
       "      <td>133.919593</td>\n",
       "      <td>115.576881</td>\n",
       "      <td>1.716464e+05</td>\n",
       "      <td>1.514751e+05</td>\n",
       "      <td>1.486229e+05</td>\n",
       "      <td>101.512436</td>\n",
       "      <td>116.66547</td>\n",
       "      <td>1.798656e+08</td>\n",
       "      <td>2.393055e+06</td>\n",
       "      <td>...</td>\n",
       "      <td>8.389724</td>\n",
       "      <td>5.915518</td>\n",
       "      <td>11.415443</td>\n",
       "      <td>0.090110</td>\n",
       "      <td>0.091439</td>\n",
       "      <td>0.638697</td>\n",
       "      <td>8.544117</td>\n",
       "      <td>11.121571</td>\n",
       "      <td>0.104893</td>\n",
       "      <td>0.497438</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>min</th>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>2.400000e+01</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.00000</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>...</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>25%</th>\n",
       "      <td>0.000008</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.140000e+02</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>2.860585e+01</td>\n",
       "      <td>62.000000</td>\n",
       "      <td>0.00000</td>\n",
       "      <td>1.120356e+04</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>...</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>50%</th>\n",
       "      <td>0.014120</td>\n",
       "      <td>6.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>5.340000e+02</td>\n",
       "      <td>1.780000e+02</td>\n",
       "      <td>2.651198e+03</td>\n",
       "      <td>254.000000</td>\n",
       "      <td>29.00000</td>\n",
       "      <td>5.770751e+05</td>\n",
       "      <td>2.112632e+03</td>\n",
       "      <td>...</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>5.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>75%</th>\n",
       "      <td>0.719362</td>\n",
       "      <td>12.000000</td>\n",
       "      <td>10.000000</td>\n",
       "      <td>1.280000e+03</td>\n",
       "      <td>9.560000e+02</td>\n",
       "      <td>1.111111e+05</td>\n",
       "      <td>254.000000</td>\n",
       "      <td>252.00000</td>\n",
       "      <td>6.514286e+07</td>\n",
       "      <td>1.585818e+04</td>\n",
       "      <td>...</td>\n",
       "      <td>4.000000</td>\n",
       "      <td>3.000000</td>\n",
       "      <td>6.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>7.000000</td>\n",
       "      <td>11.000000</td>\n",
       "      <td>0.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>max</th>\n",
       "      <td>59.999989</td>\n",
       "      <td>10646.000000</td>\n",
       "      <td>11018.000000</td>\n",
       "      <td>1.435577e+07</td>\n",
       "      <td>1.465753e+07</td>\n",
       "      <td>1.000000e+06</td>\n",
       "      <td>255.000000</td>\n",
       "      <td>253.00000</td>\n",
       "      <td>5.268000e+09</td>\n",
       "      <td>2.082111e+07</td>\n",
       "      <td>...</td>\n",
       "      <td>59.000000</td>\n",
       "      <td>38.000000</td>\n",
       "      <td>63.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>2.000000</td>\n",
       "      <td>16.000000</td>\n",
       "      <td>60.000000</td>\n",
       "      <td>62.000000</td>\n",
       "      <td>1.000000</td>\n",
       "      <td>1.000000</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>8 rows × 40 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                dur         spkts         dpkts        sbytes        dbytes  \\\n",
       "count  82328.000000  82328.000000  82328.000000  8.232800e+04  8.232800e+04   \n",
       "mean       1.006783     18.666893     17.546303  7.994267e+03  1.323440e+04   \n",
       "std        4.710557    133.919593    115.576881  1.716464e+05  1.514751e+05   \n",
       "min        0.000000      1.000000      0.000000  2.400000e+01  0.000000e+00   \n",
       "25%        0.000008      2.000000      0.000000  1.140000e+02  0.000000e+00   \n",
       "50%        0.014120      6.000000      2.000000  5.340000e+02  1.780000e+02   \n",
       "75%        0.719362     12.000000     10.000000  1.280000e+03  9.560000e+02   \n",
       "max       59.999989  10646.000000  11018.000000  1.435577e+07  1.465753e+07   \n",
       "\n",
       "               rate          sttl         dttl         sload         dload  \\\n",
       "count  8.232800e+04  82328.000000  82328.00000  8.232800e+04  8.232800e+04   \n",
       "mean   8.241489e+04    180.973448     95.70541  6.455215e+07  6.305771e+05   \n",
       "std    1.486229e+05    101.512436    116.66547  1.798656e+08  2.393055e+06   \n",
       "min    0.000000e+00      0.000000      0.00000  0.000000e+00  0.000000e+00   \n",
       "25%    2.860585e+01     62.000000      0.00000  1.120356e+04  0.000000e+00   \n",
       "50%    2.651198e+03    254.000000     29.00000  5.770751e+05  2.112632e+03   \n",
       "75%    1.111111e+05    254.000000    252.00000  6.514286e+07  1.585818e+04   \n",
       "max    1.000000e+06    255.000000    253.00000  5.268000e+09  2.082111e+07   \n",
       "\n",
       "           ...       ct_src_dport_ltm  ct_dst_sport_ltm  ct_dst_src_ltm  \\\n",
       "count      ...           82328.000000      82328.000000    82328.000000   \n",
       "mean       ...               4.929040          3.663092        7.456528   \n",
       "std        ...               8.389724          5.915518       11.415443   \n",
       "min        ...               1.000000          1.000000        1.000000   \n",
       "25%        ...               1.000000          1.000000        1.000000   \n",
       "50%        ...               1.000000          1.000000        3.000000   \n",
       "75%        ...               4.000000          3.000000        6.000000   \n",
       "max        ...              59.000000         38.000000       63.000000   \n",
       "\n",
       "       is_ftp_login    ct_ftp_cmd  ct_flw_http_mthd    ct_src_ltm  \\\n",
       "count  82328.000000  82328.000000      82328.000000  82328.000000   \n",
       "mean       0.008187      0.008284          0.129749      6.468480   \n",
       "std        0.090110      0.091439          0.638697      8.544117   \n",
       "min        0.000000      0.000000          0.000000      1.000000   \n",
       "25%        0.000000      0.000000          0.000000      1.000000   \n",
       "50%        0.000000      0.000000          0.000000      3.000000   \n",
       "75%        0.000000      0.000000          0.000000      7.000000   \n",
       "max        1.000000      2.000000         16.000000     60.000000   \n",
       "\n",
       "         ct_srv_dst  is_sm_ips_ports         label  \n",
       "count  82328.000000     82328.000000  82328.000000  \n",
       "mean       9.164610         0.011126      0.550578  \n",
       "std       11.121571         0.104893      0.497438  \n",
       "min        1.000000         0.000000      0.000000  \n",
       "25%        2.000000         0.000000      0.000000  \n",
       "50%        5.000000         0.000000      1.000000  \n",
       "75%       11.000000         0.000000      1.000000  \n",
       "max       62.000000         1.000000      1.000000  \n",
       "\n",
       "[8 rows x 40 columns]"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.describe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# http://stackoverflow.com/questions/19482970/get-list-from-pandas-dataframe-column-headers\n",
    "\n",
    "# Surrounding code in try/except on case where there are no object type features to one-hot encode\n",
    "try:\n",
    "    tmp_df = df.describe(include=['O'])  # creates a temporary df with just categorical features that are of object type\n",
    "    categorical_object_col_name_list = tmp_df.columns.values.tolist()\n",
    "    for col_name in categorical_object_col_name_list:\n",
    "        #print col_name\n",
    "        tmp_df = pd.get_dummies(df[col_name], prefix=col_name)\n",
    "        df = pd.concat((df,tmp_df), axis=1)\n",
    "        df.drop(col_name, axis=1, inplace=True)  # go ahead and drop original feature as it has now been one-hot encoded\n",
    "except ValueError as e:\n",
    "    print (\"Value error({0}): \".format(e) ) # Note"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 82328 entries, 0 to 82331\n",
      "Columns: 191 entries, dur to state_RST\n",
      "dtypes: float64(11), int64(29), uint8(151)\n",
      "memory usage: 37.6 MB\n"
     ]
    }
   ],
   "source": [
    "df.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>dur</th>\n",
       "      <th>spkts</th>\n",
       "      <th>dpkts</th>\n",
       "      <th>sbytes</th>\n",
       "      <th>dbytes</th>\n",
       "      <th>rate</th>\n",
       "      <th>sttl</th>\n",
       "      <th>dttl</th>\n",
       "      <th>sload</th>\n",
       "      <th>dload</th>\n",
       "      <th>...</th>\n",
       "      <th>service_snmp</th>\n",
       "      <th>service_ssh</th>\n",
       "      <th>service_ssl</th>\n",
       "      <th>state_ACC</th>\n",
       "      <th>state_CLO</th>\n",
       "      <th>state_CON</th>\n",
       "      <th>state_FIN</th>\n",
       "      <th>state_INT</th>\n",
       "      <th>state_REQ</th>\n",
       "      <th>state_RST</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.000011</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>496</td>\n",
       "      <td>0</td>\n",
       "      <td>90909.09020</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>1.803636e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.000008</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>1762</td>\n",
       "      <td>0</td>\n",
       "      <td>125000.00030</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>8.810000e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.000005</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>1068</td>\n",
       "      <td>0</td>\n",
       "      <td>200000.00510</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>8.544000e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.000006</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>900</td>\n",
       "      <td>0</td>\n",
       "      <td>166666.66080</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>6.000000e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.000010</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>2126</td>\n",
       "      <td>0</td>\n",
       "      <td>100000.00250</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>8.504000e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>0.000003</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>784</td>\n",
       "      <td>0</td>\n",
       "      <td>333333.32150</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>1.045333e+09</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>0.000006</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>1960</td>\n",
       "      <td>0</td>\n",
       "      <td>166666.66080</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>1.306667e+09</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>0.000028</td>\n",
       "      <td>2</td>\n",
       "      <td>0</td>\n",
       "      <td>1384</td>\n",
       "      <td>0</td>\n",
       "      <td>35714.28522</td>\n",
       "      <td>254</td>\n",
       "      <td>0</td>\n",
       "      <td>1.977143e+08</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>0.000000</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>46</td>\n",
       "      <td>0</td>\n",
       "      <td>0.00000</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>0.000000</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>46</td>\n",
       "      <td>0</td>\n",
       "      <td>0.00000</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0.000000e+00</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>10 rows × 191 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "        dur  spkts  dpkts  sbytes  dbytes          rate  sttl  dttl  \\\n",
       "0  0.000011      2      0     496       0   90909.09020   254     0   \n",
       "1  0.000008      2      0    1762       0  125000.00030   254     0   \n",
       "2  0.000005      2      0    1068       0  200000.00510   254     0   \n",
       "3  0.000006      2      0     900       0  166666.66080   254     0   \n",
       "4  0.000010      2      0    2126       0  100000.00250   254     0   \n",
       "5  0.000003      2      0     784       0  333333.32150   254     0   \n",
       "6  0.000006      2      0    1960       0  166666.66080   254     0   \n",
       "7  0.000028      2      0    1384       0   35714.28522   254     0   \n",
       "8  0.000000      1      0      46       0       0.00000     0     0   \n",
       "9  0.000000      1      0      46       0       0.00000     0     0   \n",
       "\n",
       "          sload  dload    ...      service_snmp  service_ssh  service_ssl  \\\n",
       "0  1.803636e+08    0.0    ...                 0            0            0   \n",
       "1  8.810000e+08    0.0    ...                 0            0            0   \n",
       "2  8.544000e+08    0.0    ...                 0            0            0   \n",
       "3  6.000000e+08    0.0    ...                 0            0            0   \n",
       "4  8.504000e+08    0.0    ...                 0            0            0   \n",
       "5  1.045333e+09    0.0    ...                 0            0            0   \n",
       "6  1.306667e+09    0.0    ...                 0            0            0   \n",
       "7  1.977143e+08    0.0    ...                 0            0            0   \n",
       "8  0.000000e+00    0.0    ...                 0            0            0   \n",
       "9  0.000000e+00    0.0    ...                 0            0            0   \n",
       "\n",
       "   state_ACC  state_CLO  state_CON  state_FIN  state_INT  state_REQ  state_RST  \n",
       "0          0          0          0          0          1          0          0  \n",
       "1          0          0          0          0          1          0          0  \n",
       "2          0          0          0          0          1          0          0  \n",
       "3          0          0          0          0          1          0          0  \n",
       "4          0          0          0          0          1          0          0  \n",
       "5          0          0          0          0          1          0          0  \n",
       "6          0          0          0          0          1          0          0  \n",
       "7          0          0          0          0          1          0          0  \n",
       "8          0          0          0          0          1          0          0  \n",
       "9          0          0          0          0          1          0          0  \n",
       "\n",
       "[10 rows x 191 columns]"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.head(10)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ShuffleSplit(82328, n_iter=3, test_size=0.2, random_state=None)\n"
     ]
    }
   ],
   "source": [
    "# we want to predict the X and y data as follows for 5 feature iteration: \n",
    "if 'label' in df_five:\n",
    "    y = df_five['label'].values # get the labels we want\n",
    "    del df_five['label'] # get rid of the class label\n",
    "    X = df_five.values # use everything else to predict!\n",
    "\n",
    "    # X and y are now numpy matrices, by calling 'values' on the pandas data frames we\n",
    "    # have converted them into simple matrices to use with scikit learn\n",
    "    \n",
    "    \n",
    "# to use the cross validation object in scikit learn, we need to grab an instance\n",
    "# of the object and set it up. This object will be able to split our data into \n",
    "# training and testing splits\n",
    "num_cv_iterations = 3\n",
    "num_instances = len(y)\n",
    "cv_object = ShuffleSplit(n=num_instances,\n",
    "                         n_iter=num_cv_iterations,\n",
    "                         test_size  = 0.2)\n",
    "                         \n",
    "print (cv_object)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 82328 entries, 0 to 82331\n",
      "Columns: 191 entries, dur to state_RST\n",
      "dtypes: float64(11), int64(29), uint8(151)\n",
      "memory usage: 37.6 MB\n"
     ]
    }
   ],
   "source": [
    "dfcopy = df.copy(deep=True) # preserve original dataframe that has our dependent variable\n",
    "dfcopy.info()\n",
    "# we want to predict the X and y data as follows:\n",
    "if 'label' in dfcopy:\n",
    "    y = dfcopy['label'].values # get the labels we want\n",
    "    del dfcopy['label'] # get rid of the class label\n",
    "    X = dfcopy.values # use everything else to predict!"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Modeling and Evaluation:\n",
    "* Choose and explain your evaluation metrics that you will use (i.e., accuracy, precision, recall, F-measure, or any metric we have discussed). Why are the measure(s) appropriate for analyzing the results of your modeling? Give a detailed explanation backing up any assertions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Choose the method you will use for dividing your data into training and testing splits (i.e., are you using Stratified 10-fold cross validation? Why?). Explain why your chosen method is appropriate or use more than one method as appropriate. For example, if you are using time series data then you should be using continuous training and testing sets across time."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Create three different classification/regression models for each task (e.g., random forest, KNN, and SVM for task one and the same or different algorithms for task two). Two modeling techniques must be new (but the third could be SVM or logistic regression). Adjust parameters as appropriate to increase generalization performance using your chosen metric. You must investigate different parameters of the algorithms! "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {
    "collapsed": false,
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Number of observations= 82328\n",
      "Training...\n",
      "Predicting...\n",
      "Training...\n",
      "Predicting...\n",
      "Training...\n",
      "Predicting...\n",
      "Total training and predicition time was  21.60 seconds.\n",
      "\n",
      "Evaluating accuracies by cross validation...\n",
      "Evaluating precision by cross validation...\n",
      "Evaluating recall by cross validation...\n",
      "Evaluating accuracy by cross validation...\n",
      "Evaluating f1 by cross validation...\n",
      "\n",
      "Average Accuracies across stratified 10 fold cross validation iterations = 95.66%  accuracies cv time  62.58 seconds.\n",
      "Average Precision across stratified 10 fold cross validation iterations = 96.85% precision cv time  62.04 seconds.\n",
      "Average Recall(Sensitivity) across stratified 10 fold cross validation iterations = 95.25% recall cv time  63.16 seconds.\n",
      "Average Accuracy across stratified 10 fold cross validation iterations = 95.59% accuracy cv time  62.47 seconds.\n",
      "Average F measure(F1) across stratified 10 fold cross validation iterations = 95.97% f1 cv time  63.60 seconds.\n"
     ]
    }
   ],
   "source": [
    "numObservations = len(dfcopy)\n",
    "print (\"Number of observations= \" + str(numObservations))\n",
    "\n",
    "num_folds = 10\n",
    "kf = KFold(numObservations, n_folds=num_folds)\n",
    "skf = StratifiedKFold(y, num_folds)\n",
    "\n",
    "num_trees = 100\n",
    "max_features = 5\n",
    "\n",
    "rf_clf = RandomForestClassifier(n_estimators=num_trees, max_features=max_features, n_jobs=-1)\n",
    "\n",
    "t_total=0\n",
    "for train_indices, test_indices in cv_object:\n",
    "    t0 = time.clock()\n",
    "    X_train = X[train_indices]\n",
    "    y_train = y[train_indices]\n",
    "    \n",
    "    X_test = X[test_indices]\n",
    "    y_test = y[test_indices]\n",
    "    \n",
    "    print ('Training...')\n",
    "    rf_clf.fit( X_train,y_train )\n",
    "\n",
    "    print ('Predicting...')\n",
    "    rf_clf.predict(X_test).astype(int)\n",
    "    t1 = time.clock()\n",
    "    t_total += t1 - t0\n",
    "\n",
    "print ('Total training and predicition time was % .2f' % t_total + ' seconds.')\n",
    "\n",
    "print ('\\nEvaluating accuracies by cross validation...')\n",
    "t0 = time.clock()\n",
    "accuracies = cross_val_score(rf_clf, X, y=y, cv=skf)\n",
    "t1 = time.clock()\n",
    "accuracies_time = t1 - t0\n",
    "\n",
    "print ('Evaluating precision by cross validation...')\n",
    "t0 = time.clock()\n",
    "precision = cross_val_score(rf_clf, X, y=y, cv=skf, scoring='precision')\n",
    "t1 = time.clock()\n",
    "precision_time = t1 - t0\n",
    "\n",
    "print ('Evaluating recall by cross validation...')\n",
    "t0 = time.clock()\n",
    "recall = cross_val_score(rf_clf, X, y=y, cv=skf, scoring='recall')\n",
    "t1 = time.clock()\n",
    "recall_time = t1 - t0\n",
    "\n",
    "print ('Evaluating accuracy by cross validation...')\n",
    "t0 = time.clock()\n",
    "accuracy = cross_val_score(rf_clf, X, y=y, cv=skf, scoring='accuracy')\n",
    "t1 = time.clock()\n",
    "accuracy_time = t1 - t0\n",
    "\n",
    "print ('Evaluating f1 by cross validation...')\n",
    "t0 = time.clock()\n",
    "f1 = cross_val_score(rf_clf, X, y=y, cv=skf, scoring='f1')\n",
    "t1 = time.clock()\n",
    "f1_time = t1 - t0\n",
    "\n",
    "print (\"\\nAverage Accuracies across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.2%} \".format(np.average(accuracies)) + ' accuracies cv time % .2f' % accuracies_time + ' seconds.')\n",
    "print (\"Average Precision across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.2%}\".format(np.average(precision)) + ' precision cv time % .2f' % precision_time + ' seconds.')\n",
    "print (\"Average Recall(Sensitivity) across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.2%}\".format(np.average(recall)) + ' recall cv time % .2f' % recall_time + ' seconds.')\n",
    "print (\"Average Accuracy across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.2%}\".format(np.average(accuracy)) + ' accuracy cv time % .2f' % + accuracy_time + ' seconds.')\n",
    "print (\"Average F measure(F1) across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.2%}\".format(np.average(f1)) + ' f1 cv time % .2f' % f1_time + ' seconds.')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Analyze the results using your chosen method of evaluation. Use visualizations of the results to bolster the analysis. Explain any visuals and analyze why they are interesting to someone that might use this model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Discuss the advantages of each model for each classification task, if any. If there are not advantages, explain why. Is any model better than another? Is the difference significant with 95% confidence? Use proper statistical comparison methods. You must use statistical comparison techniques—be sure they are appropriate for your chosen method of validation. "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": true
   },
   "source": [
    "https://www.quora.com/What-are-the-advantages-of-different-classification-algorithms\n",
    "\n",
    "### Logistic Regression ###\n",
    "Logistic Regression (LR) is a fairly well-behaved classification algorithm which can be trained on features that are generally linear and as a rule of thumb is recommended as a starting point for classification to set a baseline for other classification comparrsions.  Additional advantages and disadvantages are:\n",
    "\n",
    "**Advantages**\n",
    "* Robust to noise\n",
    "* Avoid overfitting and even do feature selection by using l2 or l1 regularization.\n",
    "* Used in Big Data scenarios since it is pretty efficient\n",
    "* Distributed using, for example, ADMM (see logreg).\n",
    "* Output can be interpreted as a probability which allows for ranking instead of classification.\n",
    "* Low variance\n",
    "* Provides probabilities for outcomes\n",
    "* Works well with diagonal (feature) decision boundaries\n",
    "* NOTE: logistic regression can also be used with kernel methods\n",
    "\n",
    "**Disadvantages**\n",
    "* High bias\n",
    "* Cannot deal with missing values\n",
    "* Requiring to impute the missing values (or substitute them with a mean or median).\n",
    "\n",
    "\n",
    "https://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=5&cad=rja&uact=8&ved=0ahUKEwiJi87y243QAhXmjFQKHbEbB-cQFggvMAQ&url=http%3A%2F%2Fhome.etf.rs%2F~vm%2Fos%2Fdmsw%2FRandom%2520Forest.pptx&usg=AFQjCNEVo5hQOuo-6p2g3Tsa_snZfjlNnA&sig2=x5GDssT9R0WG1GaFb0pXYw&bvm=bv.137901846,d.cGw\n",
    "\n",
    "### Random Forest ###\n",
    "Random Forest (RF) is a tree ensemble that has several advantages over LR.  One of the main advantages of RF is linearity of features is not expected.  Tree ensembles such as RF handle categorical features well as well as high dimensional spaces and large number of training examples. Additional advantages and disadvantages are:\n",
    "\n",
    "**Advantages**\n",
    "* One of the more accurate learning algorithms available.\n",
    "* Produces a highly accurate classifier for most datasets.\n",
    "* Efficient on large databases.\n",
    "* Supports thousands of input variables without variable deletion.\n",
    "* Estimates what variables are important in the classification.\n",
    "* It has an effective method for estimating missing data and maintains accuracy when a large proportion of the data are missing.\n",
    "* Supports methods for balancing error in class population unbalanced data sets. \n",
    "\n",
    "**Disadvantages**\n",
    "* Can overfit some datasets with noisy classification/regression tasks.\n",
    "* Biased towards categorical variables with attributes containing more levels.  This can cause the variable importance scrores not to be reliable.\n",
    "\n",
    "\n",
    "https://en.wikibooks.org/wiki/Data_Mining_Algorithms_In_R/Classification/kNN\n",
    "\n",
    "### KNN ###\n",
    "K-Nearest Neighbors (KNN) is a simple algorithm that stores all available cases and classifies new cases based on distance functions.  KNN can be used for both classification and regression predictive problems.\n",
    "\n",
    "**Advantages**\n",
    "* Very simple implementation.\n",
    "* Robust with regard to the search space; for instance, classes don't have to be linearly separable.\n",
    "* Classifier can be updated online at very little cost as new instances with known classes are presented.\n",
    "* Few parameters to tune: distance and k.\n",
    "* Offers noise reduction techniques that can be effective in improving the accuracy of the classifier.\n",
    "\n",
    "**Disadvantages**\n",
    "* Large storage requirements\n",
    "* Computationally intensive recall\n",
    "* Can have poor run-time performance if the training set is large\n",
    "* Highly susceptible to dimensionality\n",
    "* Expensive testing of each instance due to computing distance to all known instances. \n",
    "* Sensitive to noisy or irrelevant attributes, which can result in less meaningful distance numbers. \n",
    "* Sensitive to very unbalanced datasets, where most entities belong to one or a few classes.\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Which attributes from your analysis are most important? Use proper methods discussed in class to evaluate the importance of different attributes. Discuss the results and hypothesize about why certain attributes are more important than others for a given classification task."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Deployment:\n",
    "* How useful is your model for interested parties (i.e., the companies or organizations that might want to use it for prediction)? How would you measure the model's value if it was used by these parties? How would your deploy your model for interested parties? What other data should be collected? How often would the model need to be updated, etc.? "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Exceptional Work:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "df_copy = df.select_dtypes(['float64', 'int64'])\n",
    "\n",
    "y = df.attack_cat\n",
    "\n",
    "\n",
    "#######################################################\n",
    "# Percentage of variance explained by first component\n",
    "#######################################################\n",
    "pca = PCA(n_components=1)\n",
    "x_pca = pca.fit(df_copy).transform(df_copy)\n",
    "\n",
    "# Percentage of variance explained for each component\n",
    "print('explained variance ratio (first component): %3.2f'\n",
    "      % (100 * pca.explained_variance_ratio_))\n",
    "\n",
    "\n",
    "############################################################\n",
    "# Percentage of variance explained for first two components\n",
    "############################################################\n",
    "pca = PCA(n_components=2)\n",
    "x_pca = pca.fit(df_copy).transform(df_copy)\n",
    "\n",
    "# Percentage of variance explained for each components\n",
    "print('explained variance ratio (first two components): %s'\n",
    "      % (100 * pca.explained_variance_ratio_) )\n",
    "\n",
    " \n",
    "##############################################################\n",
    "# Percentage of variance explained for first three components\n",
    "##############################################################\n",
    "pca = PCA(n_components=3)\n",
    "x_pca = pca.fit(df_copy).transform(df_copy) \n",
    "\n",
    "# Percentage of variance explained for each components\n",
    "print('explained variance ratio (first three components): %s'\n",
    "      % (100 * pca.explained_variance_ratio_) )\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python [Root]",
   "language": "python",
   "name": "Python [Root]"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
