{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Lab 2: Network Intrusion Detection"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Imports\n",
    "from matplotlib import pyplot as plt\n",
    "\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.cross_validation import StratifiedKFold\n",
    "\n",
    "\n",
    "from sklearn.naive_bayes import GaussianNB\n",
    "from sklearn import preprocessing\n",
    "#from sklearn.decomposition import RandomizedPCA \n",
    "from sklearn.decomposition import PCA\n",
    "from sklearn.lda import LDA\n",
    "\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn.grid_search import GridSearchCV\n",
    "from sklearn import metrics as mt\n",
    "\n",
    "import seaborn as sns\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Load UNSW_NB15 into a Pandas dataframe\n",
    "df = pd.read_csv('UNSW_NB15_training_set.csv', encoding='utf-8-sig')\n",
    "df_five = df[['sttl','ct_dst_sport_ltm', 'ct_src_dport_ltm', 'swin', 'dwin', 'label' ]] # \n",
    "\n",
    "\n",
    "# Remove the four duplicate rows with invalid value for is_ftp_login\n",
    "df = df[df.is_ftp_login != 2]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data Preparation:\n",
    "\n",
    "* Define and prepare your class variables. Use proper variable representations (int, float, one-hot, etc.). Use pre-processing methods (as needed) for dimensionality reduction, scaling, etc. Remove variables that are not needed/useful for the analysis."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "duplicate record deleted successfully: 82328 observations remaining\n"
     ]
    }
   ],
   "source": [
    "# Lets remove attributes that are not useful to us during this first analysis pass\n",
    "non_useful_features_list = ['id', 'attack_cat']\n",
    "# id: n internal variable to just ref an obseration. deemed not usefl\n",
    "# attack_cat: first try and just predict the label. \n",
    "#             It will obviously 1:1 correlate with label\n",
    "#             We can circle back and swap it out with label \n",
    "#             to see if we get any better accuracy on an \n",
    "#             on an attack type level\n",
    "for feature in non_useful_features_list:\n",
    "    if feature in df:\n",
    "        df.drop(feature, axis=1, inplace=True)  # Lets drop id as it is an internal variable to just ref an obseratio\n",
    "        \n",
    "# Overwrite the existing dataframe with the new dataframe that does not contain the \n",
    "# four unwanted records and confirm we have 4 less records (shold have 82328 observations)\n",
    "if \"is_ftp_login\" in df:\n",
    "    df = df[df.is_ftp_login != 2]\n",
    "    if len(df) == 82328:\n",
    "        print \"duplicate record deleted successfully: \" + str(len(df)) + \" observations remaining\" \n",
    "        \n",
    "# Check to see if non useful features still exist in dataframe, if so, we did something wrong\n",
    "for feature in non_useful_features_list:\n",
    "    if feature in df:\n",
    "        print \"[\" + feature + \"]\" + \"still found, check removal code. (Should not see this)\"        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Value error(No objects to concatenate): \n"
     ]
    }
   ],
   "source": [
    "# http://stackoverflow.com/questions/19482970/get-list-from-pandas-dataframe-column-headers\n",
    "\n",
    "# Surrounding code in try/except on case where there are no object type features to one-hot encode\n",
    "try:\n",
    "    tmp_df = df.describe(include=['O'])  # creates a temporary df with just categorical features that are of object type\n",
    "    categorical_object_col_name_list = tmp_df.columns.values.tolist()\n",
    "    for col_name in categorical_object_col_name_list:\n",
    "        #print col_name\n",
    "        tmp_df = pd.get_dummies(df[col_name], prefix=col_name)\n",
    "        df = pd.concat((df,tmp_df), axis=1)\n",
    "        df.drop(col_name, axis=1, inplace=True)  # go ahead and drop original feature as it has now been one-hot encoded\n",
    "except ValueError as e:\n",
    "    print \"Value error({0}): \".format(e)  # Note"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 82328 entries, 0 to 82331\n",
      "Columns: 191 entries, dur to state_RST\n",
      "dtypes: float64(162), int64(29)\n",
      "memory usage: 120.6 MB\n"
     ]
    }
   ],
   "source": [
    "df.info()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# scale the features between 0 and 1 to a standrdize unit siz so that \n",
    "# downstream classification algorithms do not overfit due to \n",
    "# overlize large scaled features.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 82328 entries, 0 to 82331\n",
      "Columns: 191 entries, dur to state_RST\n",
      "dtypes: float64(162), int64(29)\n",
      "memory usage: 120.6 MB\n",
      "<class 'pandas.core.frame.DataFrame'>\n",
      "Int64Index: 82328 entries, 0 to 82331\n",
      "Columns: 190 entries, dur to state_RST\n",
      "dtypes: float64(162), int64(28)\n",
      "memory usage: 120.0 MB\n",
      "{0: 37000, 1: 45328}\n",
      "Percent normal(0) is 45%\n",
      "Percent attack(1) is 55%\n"
     ]
    }
   ],
   "source": [
    "dfcopy = df.copy(deep=True) # preserve original dataframe that has our dependent variable\n",
    "dfcopy.info()\n",
    "# we want to predict the X and y data as follows:\n",
    "if 'label' in dfcopy:\n",
    "    y = dfcopy['label'].values # get the labels we want\n",
    "    del dfcopy['label'] # get rid of the class label\n",
    "    X = dfcopy.values # use everything else to predict!\n",
    "dfcopy.info() # should have 190 entries\n",
    "    # X and y are now numpy matrices, by calling 'values' on the pandas data frames we\n",
    "    # have converted them into simple matrices to use with scikit learn\n",
    "\n",
    "# determine if we have roughly the same percentage of  normal vs abnormal observations\n",
    "# ref: http://stackoverflow.com/questions/28663856/how-to-count-the-occurrence-of-certain-item-in-an-ndarray-in-python\n",
    "# ref: http://www.gossamer-threads.com/lists/python/python/809232 (print percentage)\n",
    "unique, counts = np.unique(y, return_counts=True)\n",
    "uniqueClassCounts = dict(zip(unique, counts))    \n",
    "totalObservations = len(y)\n",
    "print uniqueClassCounts\n",
    "print \"Percent normal(0) is {0:.0%}\".format(float(uniqueClassCounts[0])/totalObservations)\n",
    "print \"Percent attack(1) is {0:.0%}\".format(float(uniqueClassCounts[1])/totalObservations)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "ref:\n",
    "(a) http://stats.stackexchange.com/questions/49540/understanding-stratified-cross-validation\n",
    "(b) http://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset/\n",
    "(c) http://machinelearningmastery.com/classification-accuracy-is-not-enough-more-performance-measures-you-can-use/\n",
    "\n",
    "\n",
    "Per ref (a) startification in general seeks to ensure each fold is representative of all strata in the data. The label feature has two classes (categories) as seen above normal(0) and attack(1) with percentages of 45% and 55%, respectively based on distribution of the classes with respect to the complete dataset. If it was 50%/50% one could argue a random sampling should create evenly distributed folds and with large enough sample size, should be sufficient. The main argument for stratification is to address the biasing effects of classification algorithms based on over/under representation of classes in any one particular sample or folds for classification algorithms that do not inherently have inherent balancing techniques either by selection or weighting.  According to ref (a), however, it does lead to a loss of diversity (unwanted loss of variance). Basically, the more unbalanced then the classes are, the more biased a classification algorithm in general would be to the observations tending to the more popular class. Stratification seeks to correct for that artificially to address algorithms that cleverly try and predict the class witht the highest weighting based on class distribution.\n",
    "\n",
    "Since the values in the labels are not exactly equal, a stratified Kfold validation is recommended so that each set or fold contains approximately the same percentage of samples of each target class as the complete set.\n",
    "Note: Will do both for comparision sake to see if there is any difference\n",
    "in results and to get a feel for the methodology.\n",
    "\n",
    "Note: per ref (a) To combat imbalanced training data, one technique woudl be to try and colect more data with a larger dataset.  However. since we did not generate this data ourselves, this would not be an option.\n",
    "\n",
    "Another thing that we can do to to measure performance is to selecting the right set of performance metrics beyond the arguably defalt one which is accuracy.  \n",
    "\n",
    "With our particular dataset there are a couple of different view points on which performance metrics a model should be tuned for for this particular dataset:\n",
    "1) False negatives are probably worse than false positives. If a malicious packet gets through undetected, it could potentially do bad things without being noticed, whereas a false positive could be rescreened or reviewed to clear the packet. Note: however, if the system has protection in depth, perhaps down stream systems may catch the packet or some effect of the malicous abnormal packet (e.g. host based intrusion detection system, so it may not be that bad).\n",
    "Recall may be a good metric to gauge model performance based on highlighting false negatives. Recall is defined as the number of True Postivies divided by the number of True Positives and the number of False Negatives, also known as Sensitivity or the True Postive Rate. \n",
    "\n",
    "2) However, if there are too may false positives, where the system over classifies packets as malcious when they really aren't then it could be interfering with normal operations and impact business operations. On the assumption that it takes some signficant amount of time to clear a tagged (false) positive packet. If a tool identifies too many false positives, people would lose trust with the system. Based on this view, precision which is defined as the number of True Positives divided by the number of True Positives and False positives would be a good metric to use to gauge performance of models used for predicting abnormal packets.\n",
    "\n",
    "3) Based on these view points, a metric which combines both would be the F1 Score which is 2*((precision*recall)/(precision+recall)) also know as the F Score or F Measure which tries to convey the balance between precision and recall. "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The Variance Threshold transformer would automatically remove  any fetures that hav no variance. Although there are some small variance, there are alot of small variances. Based on not having enough domain knowledge of any individual feature what what would a reasonable threshold value to use, we will continue to use all of them and perhaps apply a different technique to reduce the number of features. Lets perhaps try PCA to see how many principal components we would need to maintain explain at least 90% of the variation in an effort to reduce the number of features for downstream classification tasks to improve performance in classification tasks. (Ref: Book: Learning Data Mining with Python)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[  2.21890799e+01   1.79342396e+04   1.33578532e+04   2.94621370e+10\n",
      "   2.29444304e+10   2.20884887e+10   1.03046496e+04   1.36106665e+04\n",
      "   3.23512553e+16   5.72664168e+12   4.17972493e+03   3.10349621e+03\n",
      "   3.82261060e+07   1.67030265e+06   3.21772939e+09   1.32158617e+07\n",
      "   1.62196787e+04   1.93451972e+18   1.90998275e+18   1.62538616e+04\n",
      "   1.34613899e-02   5.02036285e-03   3.03537401e-03   4.34618776e+04\n",
      "   5.98313643e+04   2.94774411e-01   1.44914706e+09   1.22996215e+02\n",
      "   1.13892530e+00   7.08670434e+01   7.03866176e+01   3.49929328e+01\n",
      "   1.30310762e+02   8.11974201e-03   8.36107223e-03   4.07929478e-01\n",
      "   7.30010471e+01   1.23687849e+02   1.10024339e-02   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   1.16470772e-03   4.00675012e-04\n",
      "   3.88538067e-04   1.18449036e-02   3.88538067e-04   4.12811663e-04\n",
      "   3.88538067e-04   3.88538067e-04   4.00675012e-04   3.88538067e-04\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   4.00675012e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   4.00675012e-04   3.88538067e-04\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   4.00675012e-04   3.88538067e-04   1.06775261e-03\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   3.76400826e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.64263290e-04   4.00675012e-04   3.88538067e-04   4.00675012e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   4.00675012e-04   3.88538067e-04   7.40389694e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   4.00675012e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   4.12811663e-04\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   6.31220918e-04   3.88538067e-04   4.00675012e-04\n",
      "   3.88538067e-04   4.00675012e-04   3.88538067e-04   4.00675012e-04\n",
      "   8.14363673e-03   3.88538067e-04   6.31220918e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   4.12811663e-04   3.88538067e-04\n",
      "   4.00675012e-04   3.88538067e-04   3.88538067e-04   4.00675012e-04\n",
      "   7.76773975e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.91998962e-03   3.88538067e-04\n",
      "   3.88538067e-04   7.04002758e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   4.00675012e-04   3.88538067e-04   6.55482712e-04\n",
      "   6.31220918e-04   3.88538067e-04   2.49452142e-01   3.88538067e-04\n",
      "   3.88538067e-04   4.00675012e-04   4.00675012e-04   3.88538067e-04\n",
      "   2.29644355e-01   4.08722041e-02   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   3.88538067e-04   3.88538067e-04\n",
      "   3.88538067e-04   3.88538067e-04   4.00675012e-04   4.12811663e-04\n",
      "   3.88538067e-04   3.88538067e-04   2.44708077e-01   3.15710195e-04\n",
      "   1.92176598e-01   1.84492907e-02   1.66690389e-02   9.05262404e-02\n",
      "   6.07289906e-05   5.11158576e-03   1.09306872e-04   2.19777418e-02\n",
      "   3.52125459e-04   2.47175335e-03   3.64263290e-04   4.85837826e-05\n",
      "   1.21463883e-05   7.76148666e-02   2.49506449e-01   2.42768556e-01\n",
      "   2.18733267e-02   1.21463883e-05]\n",
      "We didn't find any features to have 0 variance, moving on.\n",
      "190\n"
     ]
    }
   ],
   "source": [
    "# Use Variance Threshold transformer in scikit-learn to remove any feature\n",
    "# that does have a minimum level of variance, using 0 as threshold\n",
    "# based on limited domain knowledge. \n",
    "from sklearn.feature_selection import VarianceThreshold\n",
    "vt = VarianceThreshold()\n",
    "Xt = vt.fit_transform(X)\n",
    "print vt.variances_\n",
    "if len(Xt[0]) < 190:\n",
    "    print \"we found some features to have 0 variance and hence will remove them\"\n",
    "else:\n",
    "    print \"We didn't find any features to have 0 variance, moving on.\"\n",
    "print len(Xt[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.21372809, -0.12445524, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521],\n",
       "       [-0.21372873, -0.12445524, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521],\n",
       "       [-0.21372936, -0.12445524, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521],\n",
       "       ..., \n",
       "       [-0.21373043, -0.13192245, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521],\n",
       "       [-0.21373043, -0.13192245, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521],\n",
       "       [-0.21372851, -0.12445524, -0.15181591, ...,  1.18737468,\n",
       "        -0.15128109, -0.00348521]])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# Scale data to have zero mean and unit variance to ensure that\n",
    "# down stream classification algorithms that are sensitive and unfairly \n",
    "# weight association due to large scaled features  are densensitized to that\n",
    "# effect. Also when performing PCA for feature redution, since PA yields a feature\n",
    "# subspace that maximizes variance along the axes, it makes sense to standardize\n",
    "# the data, especially since the features appear to be on different scales.\n",
    "\n",
    "scaler = preprocessing.StandardScaler().fit(X)\n",
    "scaler.mean_\n",
    "scaler.scale_\n",
    "scaler.transform(X)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[  3.12577565e+18   7.21829322e+17   2.92489964e+16   5.49388956e+12\n",
      "   2.94308575e+10   2.28671040e+10   1.31082682e+10   3.18421633e+09\n",
      "   1.22443421e+09   3.70924227e+07   8.48041068e+06   7.99521381e+05\n",
      "   3.59673237e+04   2.96460691e+04   1.05545522e+04]\n",
      "[  8.06264871e-01   1.86189186e-01   7.54450766e-03   1.41709791e-06\n",
      "   7.59141703e-09   5.89835762e-09   3.38115635e-09   8.21339103e-10\n",
      "   3.15831460e-10   9.56764680e-12   2.18744337e-12   2.06229133e-13\n",
      "   9.27743792e-15   7.64692887e-15   2.72244896e-15]\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAADOCAYAAAA+JbcoAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAGyBJREFUeJzt3Xl8nFW5wPHfTNIkbdZmKVBLpQg8VEGWVrYLtS0guyJ6\n5SMCWuUCggtyRSnegrhw4SIIV1ygxQplEZC1FISryNLCR2QTIuWh7BS6pUvSNkubJveP8046TZOZ\nM5l5Z8k838+nn2bed+Y9Z9LOM+c9y3Mivb29GGOKVzTXFTDG5JYFAWOKnAUBY4qcBQFjipwFAWOK\nnAUBY4pcaa4rMBQiciBwuapOS/K83YB7VPWTweOdgXnB6TXAKaraGWpljclzBdcSEJELgNlAeZLn\nnQrcDjTGHf4e8EdVnQq8CnwjpGoaUzAKsSXwBvB5gm90EdkbuDY4txr4uqqux33TTwHejHvtS8C4\n4Oca4L1sVNiYfFZwLQFVvRfojjt0A3COqk4HHgZ+GDzvIVXt6PfypcC3RKQZOBq4KwtVNiavFVwQ\nGMBE4Dci8hgwAxib4LlXAqer6l7AeWztHzCmaBXi7UB/r+E+2EtF5BBgx37nI3E/rwHagp+XAXVZ\nqJ8xeW04BIFzgHkiUgr0sH1nX/wKqe8A14lISfD43CzUz5i8FrFVhMYUt+HQJ2CMSYMFAWOKXEH1\nCZzwn/f3nnPiXkzec0zWyhw9ehRr17ZnrbxiKbMY3mOuymxqqo4kf9ZWBdcSWN2W3Vm+paUlyZ9k\nZeZ9ecVUZqpCbQmISBQ3xVdwPfdnq+qrcedPAGYBm4G5qjon2TVbWm2qvzGZFHZL4ASgV1UPxX3Y\nL4udCIb0rgaOAKYCZ4pIU7ILrrYgYExGhRoEVPV+4Mzg4S7A2rjTE4ElqtqmqpuBhbi5/glZS8CY\nzAq9Y1BVe0TkD8CJwBfjTtUArXGP1wO1ya63uq2D3t5eIpGU+j6MMYPIyuiAqn5NRMYAz4rIxGBh\nTxsuEMRUA+sSXWdkeQkdXVsYVVVB1aiyEGu8raam6qyVVUxlFsN7zFWZqQi7Y/BUYJyqXg50Altw\nHYQAi4HdRKQOaMfdClyZ6HpjRo/i3eXr0bdaGL9Ddn6xTU3VrFq1PitlFVOZxfAec1lmKsLuGLwH\n2E9EnsAt8z0POElEzlDVbuB84FFgETBHVZcluljT6FGA9QsYk0mhtgRUtR04OcH5BcAC3+vtUG9B\nwJhMK6jJQmP6WgL9c4UYY4aqsIJA/UjA5goYk0mFFQSCloAFAWMyp6CCgPUJGJN5BRUEairLKCuN\n0t7VTXtnd/IXGGOSKqggEIlEaKitALK/mtCY4aqgggBAY63rHLQRAmMyowCDQNASsH4BYzKi4IJA\n7HbAOgeNyYyCCwLWEjAmswouCFhLwJjMKrgg0FhjowPGZFLBBYGayjJGlEbZ0LGZji6bK2BMugou\nCEQiERqsNWBMxhRcEADrHDQmkwoyCFjnoDGZk3ZSERE5XlUfHOB4KfB7XJbhMuDnqjo/7vx5wBnA\nyuDQWaq6xKdMawkYkzmZyCz0E2C7IACcCrSo6ukiMhp4CZgfd34ScJqqvphqgX0tAesTMCZtmQgC\ng+X+vhO4K/g5ittlKN4kYKaI7AQsCJKRemmsiSUXsfUDxqQrE30CvQMdVNV2Vd0oItW4YPCjfk+5\nHTgbmAYcKiLH+hZofQLGZE7YKcd3xmUcvk5V7+h3+lpVbQuetwDYD3go2TWbmqppaKiitCTK+vbN\nVNeMpKI83O0TiiVXve07MHzKTEVonx4R2QF4BDhXVf/W71wN0CwiewIdwHTgRp/rxnK419eUs3Jt\nB/pWC2MbKzNa93jFlKve9h0YPmWmIsw+gZlAHTBLRC7G3TbMBipVdY6IzAQex21K8ldV/XMqhTbW\nVrBybQctrZ2hBgFjhjvvICAipwCfAH4OfFFVbw5OHTzQ81X1PNxmIwNS1VuBW/2ruq2tw4TWOWhM\nOrw6BkXkcuBY4CRc4JghIlcBqGpOeucaYhmGbJjQmLT4jg4cBZwGdAadeUcCx4RWKw99qwlthMCY\ntPgGgdgmorHhwPK4Yzlhw4TGZIZvELgTuAOoD6b7PgXcFlqtPNjUYWMywysIqOoVuCG8u4DxwCxV\nvSzMiiVTV1VOSTRC68ZNbNq8JZdVMaag+XYMjgWmq+oFwK+Ak4N5ADkTjUaorykHLK+AMenwvR24\nFXgr+PlD3O3AvFBqlILYHgR2S2DM0PkGgXpVvR5AVbtUdTbQGF61/NhqQmPS5xsEOkSkb0hQRA4H\nNoZTJX82TGhM+nxnDJ4N3CIi83DThN/DzRvIKRsmNCZ9XkFAVV8C9hKRBmBzbPVfrtkwoTHp8woC\nIrIfcBFQD0REBABVnR5e1ZLb2hKw9QPGDJXv7cDNwPVAM4MkEcmF0dXlRCMR1m3YxObuHkaUFmTe\nVGNyyjcItKvqdaHWZAhKolHqa8ppae1kTVsnO9SPynWVjCk4vkHgERH5Ni5JSN8NuKq+F0qtUtBQ\nU0FLayctFgSMGRLfIBAbCTg/7lgvsGtmq5O6xtoK9H3rHDRmqHxHByakemGPfQdOAGbhshDPVdU5\nqZYB1jloTLp8RwcEOAeows0TKAEmqOqUBC8bdN+BIEBcjUs73gEsEpH7VXVVqm/Apg4bkx7f7vQ7\ngHW4jMAvAWNwIwWJ3In7po+VE7/vwERgiaq2qepmYCGQKKAMyiYMGZMe3z6BqKpeIiIjgBdww4VP\nJ3qBqrYDDLLvQA3QGvd4PVDrW+l4jRYEjEmL9xChiJQDrwOTVHWhiFQke1GCfQfacIEgphrX0kiq\nfzrl0fWVRCPQuqGLutGVocwVKJZc9bbvwPApMxW+QeAW3P38V4BnRORo4INEL0i07wCwGNhNROqA\ndtytwJU+FRkoh3tddTlr2rp4/e0WxtSN9LmMt2LKVW/7DgyfMlPhOzpwnYjcpKrrRWQq8CncBzyR\nZPsOnA88iutonKOqy1KqeZzGmgrWtHWxel1HxoOAMcNdwiAgImeq6g3Bh5jYmoHA3rgdiQfkse/A\nAmBBSrUdREPtSFjaav0CxgxBspZApN/fealvNaElFzEmZQmDQCybELCLqs7IQn2GxIYJjRk63670\nvUSkKtSapMGGCY0ZOt/RgR7gPRFR3Aw/IPf5BGIsuYgxQ+cbBH4Qai3SVF9TQQRYu76LLT09lEQt\nr4Axvnw3H3kCN8GnBzfUFwU+FmK9UlJaEqWuupye3l7WtnXlujrGFBTfBUQ3AYfg0ostBvYFFuFW\nCeaFhtoK1q7vYnVbJ402V8AYb77t5inAx3FrAM4EDsQtD84bsfTj1jloTGp8g8CHwWq/xcAnVfVf\nuPn+ecOGCY0ZGt+OwQ9EZCbwF+B/gpmDeTVk2GjJRYwZEt+WwDeAt1X1H7hVgV8GvhlarYbAkosY\nMzS+LYGf4lYSoqq/wu1MnFfsdsCYofENAkuAa0SkHrgNuEVV3wmtVkPQEGxTvnZ9Fz09vUSjeb3c\nwZi84TtP4NeqeihwNC7l+H0isjDUmqVoRGkJtVVlbOnpZd0GmytgjC/vqXUiUgscAXwG14JIlk8g\n62yY0JjU+U4Wmo9LMnoPMEtV/x5qrYaoobaCNz9so6W1gz12rst1dYwpCL59AjcAD6tqd/8TscQj\ng71QRA4ELlfVaf2OnwecAawMDp2lqks86zMgGyEwJnW+6cXmJzh9Ni5IbEdELsDtXrRhgNOTgNNU\n9UWfOviwJcXGpC4Ty+0SdcO/AXx+kHOTgJki8pSIXJiBetgwoTFDkIkgMOhW5ap6L7DdLUTgdlwr\nYhpwqIgcm25FLK+AManz7RMIw7Wq2gYgIgtwHY8PJXtRonTKNXVuV+I16ztpaKjK2FyBYslVb/sO\nDJ8yU5GtILDNp1FEaoBmEdkTl6loOnCjz4WS5XCvGTWCtvbNvPHOakZXlw+xulsVU65623dg+JSZ\nikzcDvjsHNQLICJfFpEzghbATOBx4AmgWVX/nIG6uPTj2EIiY3wl23fg4kTnVfUnyfIMquq7uIQk\nqOrtccdvBW71r6qfxtoK3l7WxurWTnYfl+mrGzP8JGsJRII/BwJfwKUX2wQcB3wi3KoNjY0QGJOa\nZPsOXAogIouAg+N2Gr4G6L+/YF6wuQLGpMa3T6CJbYcCR+DyDeadrcOE1idgjA/f0YHZwHMi8hAu\ncBwPXBNardLQ1zFoWYeN8eK7lPhK4HRgOW5L8i+p6m/DrNhQxVYSrm7tpKd30HlMxphAKkOEgrsF\nuB7YJ5zqpK+8rISqkSPo3tJD28ZNua6OMXnPKwiIyOXAscBJQAkwQ0SuCrNi6bDpw8b4820JHIVb\nDdgZTPQ5EjgmtFqlyYYJjfHnGwR6gr9jN9nlccfyjqUfN8afbxC4E7gDqA+SgTyJSzial/qSi9gI\ngTFJ+SYVuUJEjgLeBcYDl6jqg6HWLA0N1hIwxlsqowNLgQeA+4A2EZkSTpXSFz9MaIxJzDfR6K+B\nE4A34w734pYA552GuNGB3t5eIhHbg8CYwfjOGPwMIKpaEO3rkeWlVFaUsrGzm/Xtm6mpzKsNlI3J\nK763A2+ROJdg3rFhQmP8+LYE1gCvisjTuB2IAFDVr4dSqwxorB3Jeys20NLawa5ja3JdHWPylm8Q\n+HPwp2D0zRpss5aAMYkkyyy0o6ouJ43cAQk2HzkBmAVsBuaq6pyhljGQBtuSzBgvyVoCc3DLhp/A\njQbE9wv0ArsmevFgm4+ISClwNW7vgQ5gkYjcr6qrUqp9ArZ+wBg/yTILHR/8PWGI149tPjKv3/GJ\nwJK4lOMLgSnA3UMsZzsNFgSM8eI7T0CAc4AqXGugBJigqgknDKnqvSLy0QFO1QCtcY/XA7U+dfFN\npzyyamufQGNjVVpzBYolV73tOzB8ykyFb8fgHcD9wGHAH3ArCJvTKLcNFwhiqvFLXZ5SDveR5aV0\ndHXz9ntrqB41tLkCxZSr3vYdGD5lpsJ3nkBUVS/BjRC8AJyIy0Dsq//X8GJgNxGpE5Ey3K3AMylc\nz4slHTUmOd8g0C4i5cDrwCRV7QIqUiin/+Yj3cD5wKPAImCOqi5L4XperHPQmOR8bwduAeYDXwGe\nEZGjcbkGk0qw+cgCYEFKtU2RDRMak5xvotHrgC8EQ3hTgRtwtwR5zVoCxiSX0jZkbpCgz97AT0Ko\nU8Y09CUXsSBgzGCS3Q4U1KKh/izNmDHJeW1DBiAiY4BDgW7gKVVdG3Ld0ha/ktDyChgzMN+U418B\nXgZOAWYAzSJybJgVy4TKilIqykro3LSF9q7uXFfHmLzkOzowCzc0+AFAMAtwPvBQWBXLhEgkQmNt\nBUtXbaRlXSeVO47IdZWMyTu+8wTagL5x/GDYryC297FhQmMS820JvAI8JCJzcX0CXwKWicjpAKp6\nc0j1S1tf+nHrHDRmQL5BIIprCRwdPG4P/kzDzQbM2yAQ6xxcvqY9xzUxJj/5BoEfqeqH8QdE5ABV\nfTaEOmWUjK8D4NnFKzl5+u6Ul5XkuEbG5BffPoG/i8i/A4jICBG5ArcrUd6bsFMNHxtbQ3tXN8+8\nujzX1TEm7/gGgWnAt0XkDuA5YCRuxmBBOHzyOAD++txSent7kzzbmOLiGwTeAx7HTRYaDTymqtld\nJJ2GyTKG2qoyPmjZyGvv5v0cJ2OyyjcINAM749KCHQn8QETuCa1WGVZaEmXafh8B4C/PL81xbYzJ\nL75B4PvAX4AfAu/jEpBmPAlImD6970coLYnw0pIWVq6z4UJjYnyDwCG4lGIn4UYUTgN2CqtSYait\nLOOAiTvQCzxmrQFj+vgOER4F7A+8oKptInIkbi3B+YleJCIR4DfAPridi85Q1bfizp8HnAGsDA6d\npapLUnsL/o6YPI6nm5fz1MvLOPGwCVSU+b59Y4Yv309BT/B3rGu9PO5YIicC5ap6SLAJydVsm4xk\nEnCaqr7oWY+07LJjDbuNq+WNpa0807ycafuPy0axxuQ139uBO3EZh+uDb+8ngds8XncowfZlqvp3\nYHK/85OAmSLylIhc6FmXtBwxyX3w//K8DRcaA/7pxa4AbgTuAsYDl6jqZR4v7b+/QLeIxJd5O3A2\nbh7CodlYnrz/Hk2Mri5n2ep2Xn3HhguN8b4pVtVHgEdSvH4bbk+BmKiqxt9GXBu3C9ECYD+SLE/O\nxEYOxx+6K/MeXsyTryxj6gED7Y2S+TJTVQxlFsN7zFWZqQi7Z2wRbi/DP4nIQbjViACISA0uOcme\nuP0Ip+NaGwllYiOHSbs3cPujUZ57dQXNr69gh9GjBn1uMW1YYZuPDJ8yU+HbJzBU9wJdIrIIuAr4\nXtzeA23ATNxMxCeAZlXNyvbnNaPKOOjjseFCr8zpxgxbobYEVLUX+Ga/w6/Hnb8VuDXMOgzm8Enj\nWPjKMha+8iEnHjaBkeU2XGiKU9gtgbz10R2r2WNcLR1dW3i62VYXmuJVtEEA4IjJOwPw1+eX0mPD\nhaZIFXUQ2G+PRkZXl7N8TTuvvr0m19UxJieKOgiURKNM399WF5riVtRBAGDKPmMZURrl5TdXs8Ly\nEJoiVPRBoDoYLgTXN2BMsSn6IABuuBBg4SvL6LCdikyRsSAAjN+hGtm5js5NW1j0yrLkLzBmGLEg\nEDgilozUhgtNkbEgENh390YaaspZsbaD5rdsuNAUDwsCATdcGMs18H6Oa2NM9lgQiHPYPmMpK43S\n/NYalq3emOvqGJMVFgTiVI0cwUGf2BGw1YWmeFgQ6CeWfmxh8zLaO2240Ax/FgT6GTemij3H19Fl\nw4WmSFgQGMCR8asLe2y40AxvoWbS8Nh34ARgFrAZmKuqc8Ksj699dmuksbaCles6eP61FezSVJnr\nKhkTmrBbAn37DuBSiV0dOyEipcHjI4CpwJki0hRyfbxEo5G+4cK5D/6Lu594k78+v5TnXlvJG0tb\nWbWug83dW3JcS2MyI+ycWtvsOyAi8fsOTASWxGUbXghMAe4OuU5eDttnJx5Y9Dbvr9jA+ys2DPic\nUeWl1FWXU1tZRl1VGbVV5dRVur9rRo0gGo0MqezlbV20rsvuisZsl1kM7zEXZUYikZQTjYYdBAbc\ndyBIO97/3HqgNuT6eKusGMF/nT6Z5a1dLF3eSuuGTazb0MW6DZto3dhF64ZNtHd1097VzYctNqfA\n5I/5+++c0vPDDgKJ9h1owwWCmGpgXZLrRbKZw72pqZp9slaaMbkRdp/AIuBYgP77DgCLgd1EpE5E\nynC3AgW13bkxw0EkzP344kYHPhkcmoHbf7BSVeeIyHHAJUAEuFFVfxdaZYwxAwo1CBhj8p9NFjKm\nyFkQMKbIWRAwpsgVxAZ8yaYfh1BeKfB7YBegDPi5qs4Pq7x+ZY8BngOOUNXXkz0/A+VdCHwWGAH8\nRlXnhlxeKXAT7nfbDfxHmO9TRA4ELlfVaSLyMeAPQA9uA9xzQy5vX+B/ce+zCzhdVVeFWWbcsVOA\nbwWzdRMqlJbAoNOPQ3Iq0KKqU4BjgOtCLg/o+4D8DsjKFDMR+TRwcPB7nQqkNstkaI4FSlT134Cf\nApeFVZCIXADMBsqDQ1cDF6nqp4GoiHwu5PKuAc5V1em4HbovzGR5g5SJiOwHfN33GoUSBLaZfgxM\nTvz0tN2JW9gE7ne0OeTyYn4B/Bb4MEvlHQU0i8h9wAPAg1ko83WgNGjd1QKbQizrDeDzcY8nqepT\nwc8P49athFneyaoamxtTCnRkuLztyhSRBuBnwHd9L1AoQWDA6cdhFaaq7aq6UUSqgbuAH4VVVoyI\nfA1Yqar/h5s3kQ2NuHkbX8RtIX9bFsrcAEwAXgOuxzWXQ6Gq9+Ka4jHxv9eMT1PvX56qrgAQkUOA\nc4FfZrK8/mUGn4k5wPnARjz/HxVKEEg0/TgUIrIz8Bhwk6reEWZZgRnAkSLyN2Bf4OagfyBMq4FH\nVLU7uC/vFJHGkMv8HvBnVRVcH8/NwYzRbIj/P+MzTT1tInIyrj/rWFVdHXJx+wO74VqTtwMTRSTp\nrXNBdAziph8fD/xpgOnHGSciOwCP4O7n/hZmWTHBfWqs/L8BZ6nqypCLXQh8B/iliIwFRuECQ5jW\nsPX2ah3u/2BJyGXGvCAiU1T1SVxfz2NhFiYipwJnAlNVNeyAE1HV54C9g7I/Ctyuqucne2GhBIF7\ncd+Si4LHM0IubyZQB8wSkYuBXuAYVe0KudyYrEzjVNUFInKYiDyLazqeo6phl30N8HsReRI3IjFT\nVcO4Vx7I94HZIjICt3blT2EVFDTNrwXeBe4VkV7gCVW9NKQih/zvZtOGjSlyhdInYIwJiQUBY4qc\nBQFjipwFAWOKnAUBY4qcBQFjipwFAVNQRORTInJ5rusxnFgQMIXm40DY06mLik0WKkDBEuCLcEuO\nJwIvA6eo6oDbKAdry3+Emzv/HHAGLk/CbNz8/S3AVao6T0S+ChwHfCT4cy0wHpgOtOCm2+4EzAfe\nBHYH3gFOVdV1InI8bolwBHgLN/15lYi8DczDrVwchVtb/2Kwxv+3QH3wfr6tqv8Ukbm4RWOTgnpc\nCtwXvNdK4CrcqscbcNOOO4EZqvrm0H+zxclaAoXrYNw03z2Bj+I+XNsJ1gRcjUtSsjfu3/w44Me4\nnAl7A4cDPxaRvYKXfQr4DC4N/FXAAlXdB/fBjpWzF3C1qu6FWxH442Abud8Bn1XVfYGn2TYXwypV\nPRC3evCi4NhNwAWqOhk4C4hfrDVOVQ/DJT25SlVbgYuBB1T1v3GLkX6hqgcAvwIO8v7tmT4WBApX\ns6rG9k5fjPsmHcjBwMLYc1X1q6r6AO6b/cbg2Grct+zU4DWLVHWjqr6Hm5MeW2jzLjA6+Fnj1ubf\nhAskBwB/V9X3g+M3BMdjHonVHagXkUpcwJkrIi/iljKPEpFYGY8GBTXHlRtvAfBrEZmDW5SUjaXQ\nw06hLCAy2+uM+7mXwdeOb44/F7dUuP/zo2z9/7BNoo9Blm3H78gaS7wS6Xfd+GvG1zlW3xKgQ1X3\nj6vfWFVdKyLxzx+Qqt4tIk/jVpieh8tadGai15jtWUtg+PsHcEBcboJf4prXj+H6BmKB4XPA4wO8\nfrDgIiISv6nMQ8CzwIEiMj44fiYJlusGm9EuEZGvBBc8EnhykKfH6tFNEFhE5I/Agao6G5cJar/B\nyjKDsyAwPAzauxvcBnwXeFREXsZ1vs3Fdd7VB8ceB36mqi8luXb8z2uAS0WkGWjCJWNdifvg3yci\nr+D6FL6ZpI6nAmeIyD+BnwNfGuT5scfPAgeJyGXB8y8SkeeBK3F9BCZFNjpgUhYkrHhcVSfkui4m\nfdYnMAyISAVuM9f4iB4JHl+sqmEkELVvj2HCWgLGFDnrEzCmyFkQMKbIWRAwpshZEDCmyFkQMKbI\nWRAwpsj9P4XNga0MrC+QAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x10b124a8>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "from sklearn import linear_model, decomposition, datasets\n",
    "\n",
    "pca = decomposition.PCA()\n",
    "pca.fit(X)\n",
    "plt.figure(1, figsize=(4, 3))\n",
    "plt.clf()\n",
    "plt.axes([.2, .2, .7, .7])\n",
    "plt.plot(pca.explained_variance_[:15], linewidth=2)\n",
    "plt.axis('tight')\n",
    "plt.xlabel('n_components')\n",
    "plt.ylabel('explained_variance_')\n",
    "print pca.explained_variance_[:15]\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[  8.06264871e-01   1.86189186e-01   7.54450766e-03   1.41709791e-06\n",
      "   7.59141703e-09   5.89835762e-09   3.38115635e-09   8.21339103e-10\n",
      "   3.15831460e-10   9.56764680e-12   2.18744337e-12   2.06229133e-13\n",
      "   9.27743792e-15   7.64692887e-15   2.72244896e-15]\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQEAAADHCAYAAAAZKubgAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAHBRJREFUeJzt3Xl8XFXdx/HPZG+aNm2W0kIpFNAfaItACwgWZEcKLoiP\nvEBAq4iK8oi4gi/B/cGlCuJGKSCLCygW2QQfhFK2ByzIUoQfSLGldE3aJM2eNHn+OHfKNM3MPZOZ\nm8nM/N6vV1/tbPectM0v595zzvfGBgcHMcYUr5Jcd8AYk1tWBIwpclYEjClyVgSMKXJWBIwpclYE\njClyZVEeXERiwC+BdwDdwLmqujLh9Y8AFwH9wPWq+uso+2OM2VnUI4EPAJWqejhwMfCTIa//CDgG\nmAd8UURqI+6PMWaIqIvAPOBeAFV9Apg75PVngcnAuOCxrVwyZpRFejoATARaEx73i0iJqg4Ej18A\nngLagT+raluqg733i38Z/MTJ+/Gu2dOi6e0wJk+uZsuWzlFrr1jaLIavMVdtNjZOiKXz/qiLQBsw\nIeHx9gIgIrOBk4E9gA7gtyJymqreluqAXf2DNDZOSPWWrBvt9oqlzWL4GnPVZjqiLgKPAqcAfxKR\ndwLPJ7zWCnQCPao6KCIbcacGKa1e28qmTVsj6exwGhsnjGp7xdJmMXyNuWwzHVEXgSXA8SLyaPB4\ngYicAYxX1cUisgh4RER6gFeB34QdsKm1K7LOGlOMIi0CqjoIfGbI0y8nvH41cHU6x2xu685Cz4wx\ncXm3WGhzWw8DAzaJYEy25FURmDyhkm0Dg7S09+S6K8YUjLwqAlPqqgFoarVTAmOyJb+KwGRXBJqt\nCBiTNXlWBNzCQpshMCZ78qoI7GKnA8ZkXV4Vgcb46YBNExqTNXlVBGwkYEz2ZVwERGRqNjriozG4\nJrC5rZsBi0o3JiuyMRK4JwvH8FJVUcbE6nL6tw3S2t47Ws0aU9CyUQTS2raYqfraKsCmCY3Jlmzs\nHUg6Lk8VLyYiuwB/CD4fAw4Avqqqi1I1Vl87jtfWbaWptYt9plsQkTGZinoX4fZ4MRE5FBcv9gEA\nVd0AHA0QbDP+LnBN2AEb4iMBmyEwJityHS8WdxXw6WDXYUr1E10RsBkCY7IjGyOBVNcEwuLFEJH3\nAitU9d8+je09w+WOtHX2jVpiS7Gk0ViyUOG0mQ7vIiAis4Cjgs8sVdVngpcuSPGxpPFiCc4CrvDt\nR3lwCWJtU8eoJLYUUxqNJQsVTpvp8DodEJGzgb8Ae+EyAZeIyMcBVPWRFB99FJgfHGNovFjcXFV9\n3LfD8dmBzW3d2G3Vjcmc70jgi8AhqtoMICLfA5YC14V8LixerIEdTxdCVVWUUTOunPauPto6eqmt\nqUzn48aYIXyLQGm8AACoapOIDB3W78QjXqwJOMizD9vV11bR3tVHU2u3FQFjMuRbBJ4VkSuAa4PH\nn8DdOCQnGmqrWLV+K81t3ey9m60VMCYTvlOEnwR6ccP/3wB9wPkR9SmUTRMakz1eIwFV7QK+EnFf\nvMUXDFkRMCZzKYuAiDytqgcF5/+Jl+JjwKCqlkbauyQaat1uQts/YEzmUhYBVT0o+H2n0wYRydkV\nufrtIwGLGTMmU77rBB4f8rgEWB5JjzzErwk0t9paAWMyFXY68ABulSBDpgT7gTui61Zq1VVljK8q\no6O7n62dfUwcX5GrrhiT98JOB44BEJErVfXzo9MlP/UTq+jobqe5rduKgDEZ8F0n8FURORWowV0U\nLAVmquqlkfUsRH1tFas3ttPU2s3MaRNz1Q1j8p5vEbgNqAb2AR4GjgS81/tHIT5DYBcHjcmM72Ih\nAY7B7QX4IXAIsFtUnfLRYDFjxmSF70hgg6oOishLwP6qeqPPFGGqeLHg9YOBhcHD9cBZquqVIFpv\nC4aMyQrfkcALInIVbufgF0Tka0C5x+e2x4sBF+PixRItAj6mqkfiEoj28OyPjQSMyRLfInA+cKuq\n/gu4DJgGnOnxuaTxYiLyVqAZuEhElgJ1qvqKb8e3Lx22XAFjMuJ7OvBkwurBO/BfI5AqXqwBOAxX\nYFYCd4nIclVdmuqAiakp1VVldHb3UzW+KtJpwmKJpLJ4scJpMx3e1wRE5AhcMehJ4/ip4sWagX+r\n6ssAInIvbqSwNNUBE6Oa6iZU0dndjq7cxJ5To5kmLKZIKosXK5w20+F7OjAXeAjoFJFtIjIgIts8\nPpcqXmwlUCMiewWPjwBe8OwPkHBK0GLXBYwZKd+txI3JXhORU1T1riQvh8WLfQL4vYgAPKaqf02j\n73YPAmOyIBuR498Ghi0CHvFiS4FDR9qwTRMak7m8uxdhIpsmNCZz2SgCOZufe3PpsBUBY0Yq6tuQ\nRWr7HYrbumytgDEjlNdFYHxVGZUVpXT1bKOzpz/X3TEmL+X1NYFYLGbThMZkyLsIiMiZIvI9EakW\nkXMSXjosgn552x41ZtOExoyIb8bg5bhFPx/ETSsuEJGFAKqa0+8+ix83JjO+I4ETgbOBblVtA44H\nToqsV2mwcBFjMuNbBOLr/eOX4CsTnsspWytgTGZ8i8CtwC1AnYhciIsY+11kvUpDvRUBYzLiu3fg\nByJyIrAKmAF8Q1XvDvucR7LQhcC5wMbgqU+lkykAtnTYmEz5XhjcFThGVb8MXAWcLiK7eHw0LFlo\nDnC2qh4T/EqrAABMGFdORXkJnT39dHbbWgFj0uV7OvBb3NZfgLW404GbPD6XNFkoMAe4WEQeDiLL\n0haLxWya0JgM+BaBOlW9GkBVe1T1GlwyUJhhk4USHv8e+DRwNDBPROZ79mcHNkNgzMj5biXuEpGT\n4vv9ReRYoMPjc6mShQCuDKYcEZG7gQOBe1IdcLjUlOlTJ/D8yma6tw1GEuVULJFUFi9WOG2mw7cI\nfBq4WURuwi0TXo1bNxDmUeAU4E9Dk4VEZCKwQkT2Bbpw9zW4NuyAw0U11VS4O6SveqM161FOxRRJ\nZfFihdNmOnxnB54BZolIPdAX/+ntISxZ6GJcpmA38HdVvTet3gdsmtCYkfMqAiJyIHAJUAfEgjiw\n7TcsTcYjWei3uIuOGbFpQmNGzvd04EbgamAFOQwRSSZ+YdBmB4xJn28R6FTVn0fakwxMrC6nvKyE\n9q4+unr6GVeZjehEY4qD73fLfSJyAXAf7vwdAFVdHUmv0hRfK7B+cyfNbd1Mb6zJdZeMyRu+RSA+\nE3BRwnODwF7DvDcnGmqDItBqRcCYdPjODsyMuiOZsouDxoyM7+yA4O4ZWINbJ1AKzAzuJjwm2JZi\nY0bGd9nwLUALbkXfM8AU3EzBmPHmSMCWDhuTDt8iUKKql+E2Az2N2x044jsHRcGmCY0ZGd8i0Cki\nlbiFPnOCOxNXRdet9MV3Eto1AWPS4zs7cDNwJ/AR4HEReQ/wRmS9GoHamgrKSmNs7eyjp3cblcF+\nAmNMal4jgWCh0Gmqugk4CliEOyUYM0osV8CYEUk5EhCR81R1kYhcGjxOfHk27o7EqT6fMl4s4X1X\nA82qekl63d9RfW0VG7Z00dTaza4N4zM5lDFFI2wkEEv4fbhfYcLixRCRTwGzfDucypvThDZDYIyv\nlCOBeJoQsKeqLhjB8XeIFxORHeLFROQw4GDc5qR9R3D8HdTHE4bsdMAYb76zA7NEZCRrcZPGi4nI\nVOAy4HNk6X6GtmDImPT5zg4MAKtFRHEpQEB4ngCp48X+C6jHxYlNA8aJyEuqemOqA6ZKTdl7Ri8A\nrR19WY10KpZIKosXK5w20+FbBL4ywuMnjRdT1atw8eWIyEcBCSsAMHy8WFzZoKsv65o7shbpVEyR\nVBYvVjhtpsN3ivAh3E/1AdzuwRJgb4+PLgF6gnixhcAXROQMETk3rV56mlRTSWlJjLaOXnr7tkXR\nhDEFx3cD0Q3A4bh4sReBA3A/5a9L9bmweLGE993g048wJSUx6iZWsqmlm+a2bqbV2zShMWF8Lwwe\nCbwN+CNwHm7fQEVUncrE9j0EdnHQGC++RWCtqvbhRgH7q+oL7HjBb8zYvpvQpgmN8eJ7YfCNIB78\nfuCHwcrBMRnf0zDRpgmNSYfvSOATwGuq+g/gz8AZ7HyuPyZYwpAx6fEdCXwHt5Nwh6m9sajBwkWM\nSYtvEXgFuEJE6oDfATer6n8i61UG7MKgMenxXSfwC1WdB7wHtxvwdhF5JNKejdCkCRWUxGK0tPfS\n1z8Q/gFjipzvNQFEpBY4DjgBN4K4L6pOZaK0pIS6iZUAbLYZAmNC+S4WuhMXMvpn4Buq+kSkvcpQ\nQ20VTa3dNLV1s0tdda67Y8yY5ntNYBHwV1XtH/pCPHgku93KTL1NExrjzffmI3emePnTuCIxZlj8\nuDH+snHnzqRZAGHxYiJyGvBV3Mak36nqz7LQH5shMCYN3hcGU0h1q/Kk8WJBuMj3gWNwm5POD6Yg\nM9ZgC4aM8ZaNIpDKDvFiwPZ4sSBcZD9VbQcagr70ZqNRWzVojL9snA6kMmy8WDxdSFUHRORU4BfA\nXUBH2AF9AhMm142nJAYt7T1Mmjye8rLMal2xpNFYslDhtJmObBSBlhSvpYoXA0BVlwBLgsyCc4CU\n2QK+KS2TJ1TS3NbDy681MWXSOK/PDKeY0mgsWahw2kxH2H0HLk31uqp+OyRnMGm8mIhMwN3V6ARV\n7cWNArK2xK9+YhXNbT00t3RlVASMKXRhI4H4lf9DgOm4UJF+4FTgPx7HXwIcH8SLASwQkTOA8aq6\nWERuBpaJSC/wHMEmpWyorx0Ha1rtuoAxIcLuO/AtgOCb+DBV7QweXwE8GHbwsHgxVV0MLE6zz162\nx4/b0mFjUvK9YtbIjlOB5bi8wTHLZgiM8eN7YfAaYLmI3IMrHKcAV0TWqyywtQLG+PHdSvwj3JX7\n9bhbkn9YVX8VZccyZXcjMsZPOhPogjsFuBq3DHhMq5tYRQzYsrWHbQOWK2BMMl5FQEQuB+YDHwRK\ncVf5F0bZsUyVlZYwaUIlA4ODbGnryXV3jBmzfEcCJwJnA92q2gYcD5wUWa+yxC4OGhPOtwjEx9Px\nGYJKsriwJyo2TWhMON8icCtwC1AnIhcCy3CBo2NaPFzERgLGJOcbKvIDETkRWAXMAC5T1bsi7VkW\nWPy4MeHSmR1YA9wB3A60iciR0XQpeyxcxJhwvkGjvwDeC7ya8PQgLhBkzLIFQ8aE810xeAIgqprW\nuNojXuwM4PNAH/C8qp6fzvHDxKPHt2ztYWBgkJKSpEloxhQt39OBlaTIEkwhVbxYFfBt4N2qegQw\nSUROGUEbSZWXlVJbU8G2gUFa2m2tgDHD8R0JbAb+JSKP4X6iA6CqHw/53A7xYiIyN+G1HuBwVY1/\nd5YlHjtbGmqraG3vpam1m7pgtsAY8ybfInBv8CtdSePFgm3GmwBE5AJcxsD9YQdMNzVlt8YJvPpG\nGz0DI495KpZIKosXK5w20xGWLDRVVdfjkR2QRMp4seCawQ+Bt+CWJIdKN6qppsp9ia+t2cKmPSal\n9VkorkgqixcrnDbTETYSWIzbNvwQbjYg8brAILBXyOeTxosFFgFdqvoB7x6nKT5DsH5zZ1RNGJPX\nwpKFTgl+nznC4yeNFwOeAhYAD4vIg7iicqWq/mWEbQ3rrbu7n/5Pv7yJzu4+qqvKs3l4Y/Ke7zoB\nAc4HanCjgVJgpqqmXDAUFi/m234mdm0Yz9v2nMy//rOFh59bx4mHzIi6SWPyiu8U4S24aPEDgWeA\nKcCKqDqVbcfN2R2Avz+1hoGBVDdMMqb4+BaBElW9DDdD8DRu/v/QyHqVZfvvXU/jJHe78mdfbcp1\nd4wZU3yLQKeIVOKG8nOCuf28mXQvKYlx7EHTAbh/+Zoc98aYscW3CNyMu1HI3cAFIvJXXNZg3pi3\n/zQqy0t5cdUW1mxqz3V3jBkzfINGfw6cpqqbgKNwU3uRTetFobqqnMNnTwXctQFjjJPWbcjcJMF2\ns3Fr//PGcXOm8+DTb/D4ivWc9u69qRln04XGhI0EYiG/8sq0+vHMmllHb/8ADz+3NtfdMWZM8LoN\nGYCITMFtCOoHHlbVLRH3LRLHzZ3Oitc288BTazjh4N0pLcnstuXG5DvfyPGP4G4YeiZuld8KEZkf\nZceiMmuveqZMHkdzWw/PvNKc6+4Yk3O+Pwa/gZsa/JCqngocDlweXbeiUxKLcewcN13496dez3Fv\njMk93yLQBqyLP1DVVUBv2IdEJCYivxKRx0TkARHZacORiFSLyCMi8lbvXmdo3uxpVFaU8tLqFl7f\naNOFprj5FoHngXtE5HQROU1EbgHWicg5InJOis8lTRYCEJE5uB2KYbsRs2pcZRnzZk8DbDRgjPey\nYdxI4D24rcGdQBNwNG7dQDI7JAsBc4e8XoErFC959zhL4qcEj7+wgfauvtFu3pgxw3cX39dVdYc5\nNRE5RFWfDPlc0mQhAFV9PDjWqE83Tq2rZvZe9Ty/spllz65l/jv3GO0uGDMm+BaBJ0TkIlX9o4iU\nA98FTgf2DPlcymShkchmVNNpx76F51c2s/SZtZw1/22Ulg4/MCqWSCqLFyucNtPhWwSOBq4TkQ8B\n++LO42d7fC4sWSht2Yxqml43jl3qqtmwuZO/PfYac/edstN7iimSyuLFCqfNdPheE1gNLMWd408G\nHlBVn69sCdATJAstBL4gImeIyLlD3peTTf4lsRjHBdcG7rf9BKZI+Y4EVuB+qu8HTAOuF5FzVDVl\nOKhHslD8fTm7k9Hhs6Zy20Ov8vLrLazesJUZu4ztoZsx2eY7EvgScD/wVeB1XADp41F1ajSNqyxj\n3v5uutBGA6YY+RaBw4GTcLHgZcDZuBFBQTh2znRiwP+9sIGtnaFroIwpKL5F4ETcN363qrYBx+PW\nDBSEXSZXM3vvevq3DbDsWdtdaIqLbxGIT+vFL+BVJjxXEI6b6y4QPvD0G/RvK6gvzZiUfIvArbjE\n4ToRuRBYBvwusl7lwNv3rGNafTVbtvbwz1csjNQUD994sR8A1wJ/BGYAl6nq96Ps2GiLJewuvH+5\n7ScwxcP75h+qeh9wX4R9ybn4dOEra1pZtX4re0y16UJT+CxWJ0FVRRlH7L8rAPfb7kJTJKwIDHHM\nQbsRA5741wbaOmy60BQ+KwJDTJlczTv2aaB/2yAP2XShKQJWBIYRny588Ok1Nl1oCl6kdwUOcgJ+\nCbwD6AbOVdWVCa+/F5df2Adcr6qLo+yPr/32mMyuDeNZ29TB48+tY9/pE3PdJWMiE/VIIGm8mIiU\nBY+Pw6UTnScijRH3x0ssYXfhTfe+yJ+Wvsr9y19n+UsbeWVNCxtbuujt25bjXhqTHZGOBBgSLyYi\nifFi+wGvBMuQEZFHgCOB2yLuk5fD3j6VPy9bybqmDtY1dQz7nnGVZUyqqWBSTSW1NRVMGu9+r62p\nYGJ1BaUlIwtMWt/WQ2tLZybdH/NtFsPXmKs2080TiLoIpIoXG/raVqA24v54q6wo5etnz2FtSzdr\n1rXS0tFLa3svLe09tLb30NLeS1dPP109/axrHt1/ZGNSmTdnRlrvj7oIpIoXa8MVgrgJQEvI8WKj\nGdXU2DiBWaPWmjG5EfU1gUeB+QDDxIu9COwjIpNEpAJ3KlAQGQXG5JPY4GB0yV4JswP7B08tAOYA\n41V1sYicDFyGu7nptar668g6Y4wZVqRFwBgz9tliIWOKnBUBY4qcFQFjipwVAWOKXNTrBLIibA9C\nRG2WAdfhbrVWAXxPVe+Mss2g3SnAcuA4Vd3pHg0RtPc14H1AOfBLVb0+4vbKgBtwf6/9wCej/DpF\n5FDgclU9WkT2Bn6Dy8dcoaqfHYU2DwB+hvtae4BzVHVTVO0lPHcm8LlgyX5K+TISSHmL84icBTSp\n6pG4uPWfR91g8A3ya9xdnyMnIu8GDgv+Xo8Cdh+FZucDpar6LuA7QGQxdSLyZeAaXDAuuP83l6jq\nu4ESEXn/KLR5BfDZ4AY7S4CvRdweInIg8HHfY+RLEQi7xXkUbsXtcAT39zQa9y//MfArYLSCDE4E\nVojI7cAdwF2j0ObLQFkwuqsFokxu+TdwasLjOar6cPDnv+I2r0Xd5umqGl8kVwZ0RdmeiNTjbhj8\ned8D5EsRGHYPQpQNqmqnqnaIyARcwOrXo2xPRD4GbFTV/8UtnhoNDbjFWx/C3S5uNBKk24GZwEvA\n1bihciRUdQluGB6X+PcayV6VoW2q6gYAETkc+Czw06jaC74nFgMXAR14/j/KlyKQ9Vuc+xCR3YEH\ngBtU9ZaIm1sAHC8iDwIHADcG1wei1Azcp6r9wXl5t4g0RNzmF4B7VVVw13huDJaNj4bE/zM+e1Wy\nQkROx13Tmq+qzRE2dRCwD240+XtgPxEJPXXOiwuDRHCL8zAisgsuXfmzqvpg1O0F56nxth8EPqWq\nGyNu9hHgv4GfisiuQDWuMERpM2+eWrXg/g+WRtxm3NMicqSqLsNd53kg6gZF5CzgPOAoVY2y6MRU\ndTkwO2h3D+D3qnpR2AfzpQgswf2UfDR4vGAU2rwYmAR8Q0Quxd196SRV7RmFtkdlLbeq3i0iR4jI\nk7ih4/nBnaSjdAVwnYgsw81IXKyq2T5PTuZLwDUiUo7bwPanKBsLhudXAquAJSIyCDykqt+KoLkR\n/7vZ3gFjily+XBMwxkTEioAxRc6KgDFFzoqAMUXOioAxRc6KgDFFzoqAySsicrCIXJ7rfhQSKwIm\n37wNiHo5dVGxxUJ5KNgCfAluy/F+wHPAmaran+T9Z+I2QA3gsgrOxWUkXINbv78NWKiqN4nIR4GT\ngd2CX1cCM4BjgCbccttpwJ3Aq8BbgP8AZ6lqi4icgtsiHANW4pY/bxKR14CbcDsXq3H76v8Z7PH/\nFVAXfD0XqOqzInI9btPYnKAf3wJuD77W8cBC3K7HRbhlx93AAlV9deR/s8XJRgL56zDcMt99gT1w\n31w7CfYE/AQXUjIb929+MvBNXF7CbOBY4JsiEr/XysHACbh7QSwE7lbVd+C+sePtzAJ+oqqzcDsC\nvxncS/LXwPtU9QDgMXbMYdikqofidg9eEjx3A/BlVZ0LfApI3Kg1XVWPwIWeLFTVVuBS4A5V/R/c\nZqQfq+ohwFXAO73/9sx2VgTy1wpVXRf8+UXcT9LhHAY8En+vqn5UVe/A/WS/NniuGfdT9qjgM4+q\naoeqrsatSY9vtFkFTA7+rAl782/AFZJDgCdU9fXg+UXB83H3xfsO1InIeFzBuV5E/onbylwtIvE2\n/hY0tCKh3UR3A78QkcW4TUmjsRW64OTLBiKzs+6EPw+SfO94X+JrCVuFh76/hDf/P+wQ9JFk23bi\nbZnjoSuxIcdNPGZin+P9LQW6VPWghP7tqqpbRCTx/cNS1dtE5DHcDtMLcalF56X6jNmZjQQK3z+A\nQxKyCX6KG14/gLs2EC8M7weWDvP5ZMVFRCTxzlL3AE8Ch4pI/I6Y55Fiu25wR+pXROQjwQGPB5Yl\neXu8H/0EhUVE/gAcqqrX4FKgDkzWlknOikBhSHp1NzgN+DzwNxF5Dnfx7Xrcxbu64LmlwHdV9ZmQ\nYyf+eTPwLRFZATTiglg34r7xbxeR53HXFD4T0sezgHNF5Fnge8CHk7w//vhJ4J0i8v3g/ZeIyFPA\nj3DXCEyabHbApC0IrFiqqjNz3ReTObsmUABEpAp3R+fEih4LHl+qqlEEiNpPjwJhIwFjipxdEzCm\nyFkRMKbIWREwpshZETCmyFkRMKbI/T97D48NgwhOsgAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0xfd3de80>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.figure(1, figsize=(4, 3))\n",
    "plt.clf()\n",
    "plt.axes([.2, .2, .7, .7])\n",
    "plt.plot(pca.explained_variance_ratio_[:15], linewidth=2)\n",
    "plt.axis('tight')\n",
    "plt.xlabel('n_components')\n",
    "plt.ylabel('explained_variance_ratio_')\n",
    "print pca.explained_variance_ratio_[:15]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.99245405722335422"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "sum(pca.explained_variance_ratio_[:2]) "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "It appears the 1st two components accounts for 99% of the variation.\n",
    "lets create a pipeline of PcA and Logistic Regression\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Wall time: 6min 43s\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "GridSearchCV(cv=None, error_score='raise',\n",
       "       estimator=Pipeline(steps=[('pca', PCA(copy=True, iterated_power='auto', n_components=None, random_state=None,\n",
       "  svd_solver='auto', tol=0.0, whiten=False)), ('logistic', LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n",
       "          intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,\n",
       "          penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n",
       "          verbose=0, warm_start=False))]),\n",
       "       fit_params={}, iid=True, n_jobs=-1,\n",
       "       param_grid={'logistic__C': [0.0001, 1.0, 10000.0], 'pca__n_components': [1, 2, 3], 'logistic__penalty': ['l1', 'l2']},\n",
       "       pre_dispatch='2*n_jobs', refit=True, scoring=None, verbose=0)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "logistic = linear_model.LogisticRegression()\n",
    "pca = decomposition.PCA()\n",
    "pipe = Pipeline(steps=[('pca', pca),\n",
    "                       ('logistic', logistic)])\n",
    "#Parameters of pipelines can be set using ‘__’ separated parameter names:\n",
    "grid = dict(pca__n_components=[1, 2, 3],\n",
    "            logistic__C=[1e-4, 1.0, 1e4],\n",
    "            logistic__penalty=['l1', 'l2'])\n",
    "estimator = GridSearchCV(pipe, grid, n_jobs=-1)\n",
    "%time estimator.fit(X, y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "best score = 0.670440190458\n",
      "{'logistic__C': 0.0001, 'pca__n_components': 3, 'logistic__penalty': 'l1'}\n"
     ]
    }
   ],
   "source": [
    "print \"best score = \" + str(estimator.best_score_)\n",
    "print estimator.best_params_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "It is interesting that grid search discovere pca__n_components =3 to be part of the best parameter as the 3 component only contributed less than 1% to the variance. For curiosity sake, run again, but leave out 3 as a component to try out to see what the best score looks like"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Wall time: 3min 11s\n",
      "best score = 0.665690894957\n",
      "{'logistic__C': 0.0001, 'pca__n_components': 1, 'logistic__penalty': 'l1'}\n"
     ]
    }
   ],
   "source": [
    "grid = dict(pca__n_components=[1, 2],\n",
    "            logistic__C=[1e-4, 1.0, 1e4],\n",
    "            logistic__penalty=['l1', 'l2'])\n",
    "estimator = GridSearchCV(pipe, grid, n_jobs=-1)\n",
    "%time estimator.fit(X, y)\n",
    "print \"best score = \" + str(estimator.best_score_)\n",
    "print estimator.best_params_"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "ref: \n",
    "(a) http://scikit-learn.org/stable/auto_examples/plot_digits_pipe.html\n",
    "(b) http://matthewrocklin.com/blog/work/2016/07/12/dask-learn-part-1\n",
    "Note: I was expecting pca__n_components to be 2 since there seemed to be a significant jump from 90-99% variance explanation, but the grid search decided that only 1 was necessary to create the best score. the other parameters remained the same and the score only dropped by roughly .01. It also shaved processing time in about half. Based on ref (b) scikit-learns gird search also does cross aliation across 3-folds by default). Each estimator was fit on its corresponding testing data and and then scored on corresponding set of testing data. \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Fitting 3 folds for each of 12 candidates, totalling 36 fits\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[Parallel(n_jobs=-1)]: Done   2 tasks      | elapsed:   24.0s\n",
      "[Parallel(n_jobs=-1)]: Done   9 tasks      | elapsed:   56.2s\n",
      "[Parallel(n_jobs=-1)]: Done  16 tasks      | elapsed:  1.5min\n",
      "[Parallel(n_jobs=-1)]: Done  25 out of  36 | elapsed:  2.8min remaining:  1.2min\n",
      "[Parallel(n_jobs=-1)]: Done  29 out of  36 | elapsed:  3.0min remaining:   43.2s\n",
      "[Parallel(n_jobs=-1)]: Done  33 out of  36 | elapsed:  3.1min remaining:   17.0s\n",
      "[Parallel(n_jobs=-1)]: Done  36 out of  36 | elapsed:  3.3min finished\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Wall time: 3min 31s\n"
     ]
    },
    {
     "ename": "NameError",
     "evalue": "name 'grid_search' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-21-227505d24a11>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m     18\u001b[0m \u001b[0mestimator\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mGridSearchCV\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mpipe\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mgrid\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mn_jobs\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m-\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mverbose\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;36m10\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     19\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmagic\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34mu'time estimator.fit(X, y)'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m \u001b[1;32mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mgrid_search\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbest_estimator_\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     21\u001b[0m \u001b[1;32mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"best score = \"\u001b[0m \u001b[1;33m+\u001b[0m \u001b[0mstr\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbest_score_\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     22\u001b[0m \u001b[1;32mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mestimator\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbest_params_\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'grid_search' is not defined"
     ]
    }
   ],
   "source": [
    "from sklearn import linear_model, decomposition, datasets\n",
    "from sklearn.pipeline import Pipeline, FeatureUnion\n",
    "from sklearn.feature_selection import SelectKBest\n",
    "# This dataset is way too high-dimensional. Better do PCA:\n",
    "pca = decomposition.PCA(n_components=2)\n",
    "\n",
    "# Maybe some original features where good, too?\n",
    "selection = SelectKBest(k=1)\n",
    "# Build estimator from PCA and Univariate selection:\n",
    "\n",
    "combined_features = FeatureUnion([(\"pca\", pca), (\"univ_select\", selection)])\n",
    "# Use combined features to transform dataset:\n",
    "X_features = combined_features.fit(X, y).transform(X)\n",
    "pipeline = Pipeline(steps=[(\"features\", combined_features), ('logistic', logistic)])\n",
    "grid = dict(pca__n_components=[1, 2],\n",
    "            logistic__C=[1e-4, 1.0, 1e4],\n",
    "            logistic__penalty=['l1', 'l2'])\n",
    "estimator = GridSearchCV(pipe, grid, n_jobs=-1, verbose=10)\n",
    "%time estimator.fit(X, y)\n",
    "print(grid_search.best_estimator_)\n",
    "print(\"best score = \" + str(estimator.best_score_))\n",
    "print(estimator.best_params_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "82328\n",
      "sklearn.cross_validation.KFold(n=82328, n_folds=10, shuffle=False, random_state=None)\n",
      "Average Accuracy across 10 fold cross validation iterations = 0.606487290653 training time = 10.2802627327\n",
      "Average Accuracies across statified 10 fold cross validation iterations = 75%  training time = 10.3222235803\n",
      "Average Precision across statified 10 fold cross validation iterations = 85% training time = 10.3222235803\n",
      "Average Recall(Sensitivity) across statified 10 fold cross validation iterations = 66% training time = 10.3222235803\n",
      "Average Accuracy across statified 10 fold cross validation iterations = 75% training time = 10.3222235803\n",
      "Average F measure(F1) across statified 10 fold cross validation iterations = 73% training time = 10.3222235803\n",
      "(0.67579947052149436, 0.86077042667127313, 0.75715162668194602)\n"
     ]
    }
   ],
   "source": [
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.cross_validation import cross_val_score\n",
    "#ref : http://stackoverflow.com/questions/2866380/how-can-i-time-a-code-segment-for-testing-performance-with-pythons-timeit\n",
    "import time\n",
    "#ref: http://scikit-learn.org/0.17/modules/generated/sklearn.cross_validation.KFold.html\n",
    "numObservations = len(dfcopy)\n",
    "print numObservations\n",
    "from sklearn.cross_validation import KFold\n",
    "from sklearn.cross_validation import StratifiedKFold\n",
    "\n",
    "y = dfcopy['label'].values\n",
    "numObservations = len(dfcopy)\n",
    "num_folds = 10\n",
    "kf = KFold(numObservations, n_folds=num_folds)\n",
    "skf = StratifiedKFold(y, num_folds)\n",
    "\n",
    "# first we create a reusable logisitic regression object\n",
    "# here we can setup the object with different learning parameters and constants\n",
    "lr_clf = LogisticRegression(penalty='l2', C=1.0, class_weight=None) # get object\n",
    "print(kf)\n",
    "t0 = time.clock()\n",
    "\n",
    "for train_index, test_index in kf:\n",
    "    #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n",
    "    X_train, X_test = X[train_index], X[test_index]\n",
    "    y_train, y_test = y[train_index], y[test_index]\n",
    "    \n",
    "    # train the reusable logisitc regression model on the training data\n",
    "    lr_clf.fit(X_train,y_train)  # train object\n",
    "    y_hat = lr_clf.predict(X_test)\n",
    "t1 = time.clock()\n",
    "total = t1 - t0\n",
    "\n",
    "accuracies = cross_val_score(lr_clf, X, y=y, cv=kf) # this also can help with parallelism\n",
    "#print(accuracies)\n",
    "print \"Average Accuracy across \" + str(num_folds) + \" fold cross validation iterations = \" + str(np.average(accuracies)) + \" training time = \" + str(total)    \n",
    "\n",
    "t0 = time.clock()\n",
    "for train_index, test_index in skf:\n",
    "    #print(\"TRAIN:\", train_index, \"TEST:\", test_index)\n",
    "    X_train, X_test = X[train_index], X[test_index]\n",
    "    y_train, y_test = y[train_index], y[test_index]\n",
    "    \n",
    "    # train the reusable logisitc regression model on the training data\n",
    "    lr_clf.fit(X_train,y_train)  # train object\n",
    "    y_hat = lr_clf.predict(X_test)\n",
    "t1 = time.clock()\n",
    "total = t1 - t0\n",
    "# http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.cross_val_score.html\n",
    "# http://scikit-learn.org/stable/modules/model_evaluation.html\n",
    "accuracies = cross_val_score(lr_clf, X, y=y, cv=skf) # this also can help with parallelism\n",
    "precision = cross_val_score(lr_clf, X, y=y, cv=skf, scoring='precision')\n",
    "recall = cross_val_score(lr_clf, X, y=y, cv=skf, scoring='recall')\n",
    "accuracy = cross_val_score(lr_clf, X, y=y, cv=skf, scoring='accuracy')\n",
    "f1 = cross_val_score(lr_clf, X, y=y, cv=skf, scoring='f1')# just testing to see if it is same as default\n",
    "#print(accuracies)\n",
    "#print \"Percent normal(0) is {0:.0%}\".format(float(classCounts[0])/totalObservations)\n",
    "print \"Average Accuracies across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.0%} \".format(np.average(accuracies)) + \" training time = \" + str(total)    \n",
    "print \"Average Precision across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.0%}\".format(np.average(precision)) + \" training time = \" + str(total)    \n",
    "print \"Average Recall(Sensitivity) across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.0%}\".format(np.average(recall)) + \" training time = \" + str(total)    \n",
    "print \"Average Accuracy across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.0%}\".format(np.average(accuracy)) + \" training time = \" + str(total)    \n",
    "print \"Average F measure(F1) across stratified \" + str(num_folds) + \" fold cross validation iterations = {0:.0%}\".format(np.average(f1)) + \" training time = \" + str(total)    \n",
    "\n",
    "# http://stackoverflow.com/questions/23339523/sklearn-cross-validation-with-multiple-scores\n",
    "from sklearn import metrics\n",
    "def mean_scores(X, y, clf, skf):\n",
    "\n",
    "    cm = np.zeros(len(np.unique(y)) ** 2)\n",
    "    for i, (train, test) in enumerate(skf):\n",
    "        clf.fit(X[train], y[train])\n",
    "        y_pred = clf.predict(X[test])\n",
    "        cm += metrics.confusion_matrix(y[test], y_pred).flatten()\n",
    "\n",
    "    return compute_measures(*cm / skf.n_folds)\n",
    "\n",
    "def compute_measures(tp, fp, fn, tn):\n",
    "     \"\"\"Computes effectiveness measures given a confusion matrix.\"\"\"\n",
    "     specificity = tn / (tn + fp)\n",
    "     sensitivity = tp / (tp + fn)\n",
    "     fmeasure = 2 * (specificity * sensitivity) / (specificity + sensitivity)\n",
    "     return sensitivity, specificity, fmeasure\n",
    "    \n",
    "print mean_scores(X, y, lr_clf, skf)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The stratified 10 fold cross validation appears to have a higher accuracy score of 75% vs 61%. Not much difference in computational expense between the two cross validation methods.\n",
    "    # https://uberpython.wordpress.com/2012/01/01/precision-recall-sensitivity-and-specificity/\n",
    "   \n",
    "   Sensitivy/recall measures how good the model is at detecting the positives, which somewhat important.\n",
    "However, "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Describe the final dataset that is used for classification/regression (include a description of any newly formed variables you created)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#ref: http://matthewrocklin.com/blog/work/2016/07/12/dask-learn-part-1\n",
    "\n",
    "from sklearn.datasets import make_classification\n",
    "\n",
    "X, y = make_classification(n_samples=10000,\n",
    "                           n_features=500,\n",
    "                           n_classes=2,\n",
    "                           n_redundant=250,\n",
    "                           random_state=42)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from sklearn import linear_model, decomposition\n",
    "from sklearn.pipeline import Pipeline\n",
    "\n",
    "logistic = linear_model.LogisticRegression()\n",
    "pca = decomposition.PCA()\n",
    "pipe = Pipeline(steps=[('pca', pca),\n",
    "                       ('logistic', logistic)])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "#Parameters of pipelines can be set using ‘__’ separated parameter names:\n",
    "grid = dict(pca__n_components=[50, 100, 250],\n",
    "            logistic__C=[1e-4, 1.0, 1e4],\n",
    "            logistic__penalty=['l1', 'l2'])\n",
    "\n",
    "scores = ['precision', 'recall', 'f1', 'accuracy']\n",
    "\n",
    "#grid = dict(pca__n_components=[50],\n",
    "#            logistic__C=[1e-4],\n",
    "#            logistic__penalty=['l2'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Wall time: 32.8 s\n",
      "precision 0.89184751014\n",
      "Wall time: 35.7 s\n",
      "recall 0.8948\n",
      "Wall time: 34.8 s\n",
      "f1 0.893126414374\n",
      "Wall time: 35.2 s\n",
      "accuracy 0.8928\n"
     ]
    }
   ],
   "source": [
    "from sklearn.grid_search import GridSearchCV\n",
    "from __future__ import print_function\n",
    "\n",
    "for score in scores:\n",
    "    estimator = GridSearchCV(pipe, grid, n_jobs=-1, scoring='%s' % score)\n",
    "    %time estimator.fit(X, y)\n",
    "    print(score + \" \" + str(estimator.best_score_))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.8928"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "estimator.best_score_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'logistic__C': 0.0001, 'logistic__penalty': 'l2', 'pca__n_components': 50}"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "estimator.best_params_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Automatically created module for IPython interactive environment\n",
      "# Tuning hyper-parameters for precision\n",
      "\n",
      "Best parameters set found on development set:\n",
      "\n",
      "{'kernel': 'rbf', 'C': 10, 'gamma': 0.001}\n",
      "\n",
      "Grid scores on development set:\n",
      "\n",
      "0.986 (+/-0.016) for {'kernel': 'rbf', 'C': 1, 'gamma': 0.001}\n",
      "0.959 (+/-0.029) for {'kernel': 'rbf', 'C': 1, 'gamma': 0.0001}\n",
      "0.988 (+/-0.017) for {'kernel': 'rbf', 'C': 10, 'gamma': 0.001}\n",
      "0.982 (+/-0.026) for {'kernel': 'rbf', 'C': 10, 'gamma': 0.0001}\n",
      "0.988 (+/-0.017) for {'kernel': 'rbf', 'C': 100, 'gamma': 0.001}\n",
      "0.982 (+/-0.025) for {'kernel': 'rbf', 'C': 100, 'gamma': 0.0001}\n",
      "0.988 (+/-0.017) for {'kernel': 'rbf', 'C': 1000, 'gamma': 0.001}\n",
      "0.982 (+/-0.025) for {'kernel': 'rbf', 'C': 1000, 'gamma': 0.0001}\n",
      "0.975 (+/-0.014) for {'kernel': 'linear', 'C': 1}\n",
      "0.975 (+/-0.014) for {'kernel': 'linear', 'C': 10}\n",
      "0.975 (+/-0.014) for {'kernel': 'linear', 'C': 100}\n",
      "0.975 (+/-0.014) for {'kernel': 'linear', 'C': 1000}\n",
      "\n",
      "Detailed classification report:\n",
      "\n",
      "The model is trained on the full development set.\n",
      "The scores are computed on the full evaluation set.\n",
      "\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       1.00      1.00      1.00        89\n",
      "          1       0.97      1.00      0.98        90\n",
      "          2       0.99      0.98      0.98        92\n",
      "          3       1.00      0.99      0.99        93\n",
      "          4       1.00      1.00      1.00        76\n",
      "          5       0.99      0.98      0.99       108\n",
      "          6       0.99      1.00      0.99        89\n",
      "          7       0.99      1.00      0.99        78\n",
      "          8       1.00      0.98      0.99        92\n",
      "          9       0.99      0.99      0.99        92\n",
      "\n",
      "avg / total       0.99      0.99      0.99       899\n",
      "\n",
      "\n",
      "# Tuning hyper-parameters for recall\n",
      "\n",
      "Best parameters set found on development set:\n",
      "\n",
      "{'kernel': 'rbf', 'C': 10, 'gamma': 0.001}\n",
      "\n",
      "Grid scores on development set:\n",
      "\n",
      "0.986 (+/-0.019) for {'kernel': 'rbf', 'C': 1, 'gamma': 0.001}\n",
      "0.957 (+/-0.029) for {'kernel': 'rbf', 'C': 1, 'gamma': 0.0001}\n",
      "0.987 (+/-0.019) for {'kernel': 'rbf', 'C': 10, 'gamma': 0.001}\n",
      "0.981 (+/-0.028) for {'kernel': 'rbf', 'C': 10, 'gamma': 0.0001}\n",
      "0.987 (+/-0.019) for {'kernel': 'rbf', 'C': 100, 'gamma': 0.001}\n",
      "0.981 (+/-0.026) for {'kernel': 'rbf', 'C': 100, 'gamma': 0.0001}\n",
      "0.987 (+/-0.019) for {'kernel': 'rbf', 'C': 1000, 'gamma': 0.001}\n",
      "0.981 (+/-0.026) for {'kernel': 'rbf', 'C': 1000, 'gamma': 0.0001}\n",
      "0.972 (+/-0.012) for {'kernel': 'linear', 'C': 1}\n",
      "0.972 (+/-0.012) for {'kernel': 'linear', 'C': 10}\n",
      "0.972 (+/-0.012) for {'kernel': 'linear', 'C': 100}\n",
      "0.972 (+/-0.012) for {'kernel': 'linear', 'C': 1000}\n",
      "\n",
      "Detailed classification report:\n",
      "\n",
      "The model is trained on the full development set.\n",
      "The scores are computed on the full evaluation set.\n",
      "\n",
      "             precision    recall  f1-score   support\n",
      "\n",
      "          0       1.00      1.00      1.00        89\n",
      "          1       0.97      1.00      0.98        90\n",
      "          2       0.99      0.98      0.98        92\n",
      "          3       1.00      0.99      0.99        93\n",
      "          4       1.00      1.00      1.00        76\n",
      "          5       0.99      0.98      0.99       108\n",
      "          6       0.99      1.00      0.99        89\n",
      "          7       0.99      1.00      0.99        78\n",
      "          8       1.00      0.98      0.99        92\n",
      "          9       0.99      0.99      0.99        92\n",
      "\n",
      "avg / total       0.99      0.99      0.99       899\n",
      "\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# ref: http://scikit-learn.org/stable/auto_examples/model_selection/grid_search_digits.html\n",
    "\n",
    "from __future__ import print_function\n",
    "\n",
    "from sklearn import datasets\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.model_selection import GridSearchCV\n",
    "from sklearn.metrics import classification_report\n",
    "from sklearn.svm import SVC\n",
    "\n",
    "print(__doc__)\n",
    "\n",
    "# Loading the Digits dataset\n",
    "digits = datasets.load_digits()\n",
    "\n",
    "# To apply an classifier on this data, we need to flatten the image, to\n",
    "# turn the data in a (samples, feature) matrix:\n",
    "n_samples = len(digits.images)\n",
    "X = digits.images.reshape((n_samples, -1))\n",
    "y = digits.target\n",
    "\n",
    "# Split the dataset in two equal parts\n",
    "X_train, X_test, y_train, y_test = train_test_split(\n",
    "    X, y, test_size=0.5, random_state=0)\n",
    "\n",
    "# Set the parameters by cross-validation\n",
    "tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],\n",
    "                     'C': [1, 10, 100, 1000]},\n",
    "                    {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]\n",
    "\n",
    "scores = ['precision', 'recall']\n",
    "\n",
    "for score in scores:\n",
    "    print(\"# Tuning hyper-parameters for %s\" % score)\n",
    "    print()\n",
    "\n",
    "    clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,\n",
    "                       scoring='%s_macro' % score)\n",
    "    clf.fit(X_train, y_train)\n",
    "\n",
    "    print(\"Best parameters set found on development set:\")\n",
    "    print()\n",
    "    print(clf.best_params_)\n",
    "    print()\n",
    "    print(\"Grid scores on development set:\")\n",
    "    print()\n",
    "    means = clf.cv_results_['mean_test_score']\n",
    "    stds = clf.cv_results_['std_test_score']\n",
    "    for mean, std, params in zip(means, stds, clf.cv_results_['params']):\n",
    "        print(\"%0.3f (+/-%0.03f) for %r\"\n",
    "              % (mean, std * 2, params))\n",
    "    print()\n",
    "\n",
    "    print(\"Detailed classification report:\")\n",
    "    print()\n",
    "    print(\"The model is trained on the full development set.\")\n",
    "    print(\"The scores are computed on the full evaluation set.\")\n",
    "    print()\n",
    "    y_true, y_pred = y_test, clf.predict(X_test)\n",
    "    # http://stackoverflow.com/questions/1614236/in-python-how-to-i-convert-all-items-in-a-list-to-floats\n",
    "    print(classification_report(y_true, y_pred, target_names=[str(i) for i in digits.target_names.tolist()]))\n",
    "    print()\n",
    "\n",
    "# Note the problem is too easy: the hyperparameter plateau is too flat and the\n",
    "# output model is the same for precision and recall with ties in quality."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']"
      ]
     },
     "execution_count": 54,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "## Modeling and Evaluation:\n",
    "* Choose and explain your evaluation metrics that you will use (i.e., accuracy, precision, recall, F-measure, or any metric we have discussed). Why are the measure(s) appropriate for analyzing the results of your modeling? Give a detailed explanation backing up any assertions."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Choose the method you will use for dividing your data into training and testing splits (i.e., are you using Stratified 10-fold cross validation? Why?). Explain why your chosen method is appropriate or use more than one method as appropriate. For example, if you are using time series data then you should be using continuous training and testing sets across time."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Create three different classification/regression models for each task (e.g., random forest, KNN, and SVM for task one and the same or different algorithms for task two). Two modeling techniques must be new (but the third could be SVM or logistic regression). Adjust parameters as appropriate to increase generalization performance using your chosen metric. You must investigate different parameters of the algorithms! "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "*  Analyze the results using your chosen method of evaluation. Use visualizations of the results to bolster the analysis. Explain any visuals and analyze why they are interesting to someone that might use this model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Discuss the advantages of each model for each classification task, if any. If there are not advantages, explain why. Is any model better than another? Is the difference significant with 95% confidence? Use proper statistical comparison methods. You must use statistical comparison techniques—be sure they are appropriate for your chosen method of validation. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "* Which attributes from your analysis are most important? Use proper methods discussed in class to evaluate the importance of different attributes. Discuss the results and hypothesize about why certain attributes are more important than others for a given classification task."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Deployment:\n",
    "* How useful is your model for interested parties (i.e., the companies or organizations that might want to use it for prediction)? How would you measure the model's value if it was used by these parties? How would your deploy your model for interested parties? What other data should be collected? How often would the model need to be updated, etc.? "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Exceptional Work:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "anaconda-cloud": {},
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
