{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "%%sh\n",
    "pip -q install --upgrade pip\n",
    "pip -q install sagemaker awscli boto3 --upgrade"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<script>Jupyter.notebook.kernel.restart()</script>"
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from IPython.core.display import HTML\n",
    "HTML(\"<script>Jupyter.notebook.kernel.restart()</script>\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Direct Marketing with Keras and Hyperparameter Tuning\n",
    "\n",
    "Last update: December 2nd, 2019"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "In this lab, we're going to use a simple neural network implemented with [Keras](https://keras.io), a popular, beginner-friendly deep learning library.\n",
    "\n",
    "Here's a high-level overview of the Keras code below:\n",
    "* Read hyperparameters, architecture parameters (number and width of dense layers), and environment variables passed by SageMaker (as per [script mode](https://sagemaker.readthedocs.io/en/stable/using_tf.html))\n",
    "* Read the full data set from the training channel,\n",
    "* One-hot encode categorical variables,\n",
    "* Separate samples (X) and labels (Y),\n",
    "* Apply [min/max](https://en.wikipedia.org/wiki/Feature_scaling) scaling on numerical features,\n",
    "* Split data set for training and validation,\n",
    "* Build the neural network, with 1 to 'layers' dense layers, each one with 'dense_layer' neurons,\n",
    "* Train the model, displaying precision, recall and f1 score,\n",
    "* Score the model,\n",
    "* Save the model.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36margparse\u001b[39;49;00m, \u001b[04m\u001b[36mos\u001b[39;49;00m\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mnumpy\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mnp\u001b[39;49;00m\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mpandas\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mpd\u001b[39;49;00m\n",
      "\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mtensorflow\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mtf\u001b[39;49;00m\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mkeras\u001b[39;49;00m\n",
      "\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msubprocess\u001b[39;49;00m\n",
      "\u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36msys\u001b[39;49;00m\n",
      "\n",
      "\u001b[34mdef\u001b[39;49;00m \u001b[32minstall\u001b[39;49;00m(package):\n",
      "    subprocess.call([sys.executable, \u001b[33m\"\u001b[39;49;00m\u001b[33m-m\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[33m\"\u001b[39;49;00m\u001b[33mpip\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, \u001b[33m\"\u001b[39;49;00m\u001b[33minstall\u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, package])\n",
      "    \n",
      "\u001b[34mif\u001b[39;49;00m \u001b[31m__name__\u001b[39;49;00m == \u001b[33m'\u001b[39;49;00m\u001b[33m__main__\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m:      \n",
      "    \n",
      "    \u001b[37m# Keras-metrics brings additional metrics: precision, recall, f1\u001b[39;49;00m\n",
      "    install(\u001b[33m'\u001b[39;49;00m\u001b[33mkeras-metrics\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n",
      "    \n",
      "    parser = argparse.ArgumentParser()\n",
      "\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--epochs\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m10\u001b[39;49;00m)\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--learning-rate\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m0.01\u001b[39;49;00m)\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--batch-size\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m128\u001b[39;49;00m)\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--dense-layer\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=\u001b[34m32\u001b[39;49;00m)\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--layers\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mfloat\u001b[39;49;00m, default=\u001b[34m2\u001b[39;49;00m)\n",
      "\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--gpu-count\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mint\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_NUM_GPUS\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--model-dir\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_MODEL_DIR\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n",
      "    parser.add_argument(\u001b[33m'\u001b[39;49;00m\u001b[33m--training\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[36mtype\u001b[39;49;00m=\u001b[36mstr\u001b[39;49;00m, default=os.environ[\u001b[33m'\u001b[39;49;00m\u001b[33mSM_CHANNEL_TRAINING\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m])\n",
      "    \n",
      "    args, _ = parser.parse_known_args()\n",
      "    epochs        = args.epochs\n",
      "    learning_rate = args.learning_rate\n",
      "    batch_size    = args.batch_size\n",
      "    dense_layer   = args.dense_layer\n",
      "    layers        = args.layers\n",
      "    gpu_count     = args.gpu_count\n",
      "    model_dir     = args.model_dir\n",
      "    training_dir  = args.training\n",
      "    \n",
      "    \u001b[37m# Read data set\u001b[39;49;00m\n",
      "    data = pd.read_csv(training_dir+\u001b[33m'\u001b[39;49;00m\u001b[33m/bank-additional-full.csv\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, sep=\u001b[33m'\u001b[39;49;00m\u001b[33m;\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mData shape: \u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, data.shape)\n",
      "    \n",
      "    \u001b[37m# One-hot encode categorical variables\u001b[39;49;00m\n",
      "    data = pd.get_dummies(data)\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mOne-hot encoded data shape: \u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, data.shape)\n",
      "\n",
      "    \u001b[37m# Separate features and labels\u001b[39;49;00m\n",
      "    X = data.drop([\u001b[33m'\u001b[39;49;00m\u001b[33my_yes\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \u001b[33m'\u001b[39;49;00m\u001b[33my_no\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m], axis=\u001b[34m1\u001b[39;49;00m)\n",
      "    Y = data[\u001b[33m'\u001b[39;49;00m\u001b[33my_yes\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m]\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mX shape: \u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, data.shape)\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m\"\u001b[39;49;00m\u001b[33mY shape: \u001b[39;49;00m\u001b[33m\"\u001b[39;49;00m, data.shape)\n",
      "\n",
      "    \u001b[37m# Scale numerical features\u001b[39;49;00m\n",
      "    \u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36msklearn\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m preprocessing\n",
      "    min_max_scaler = preprocessing.MinMaxScaler()\n",
      "    X = min_max_scaler.fit_transform(X)\n",
      "    X = pd.DataFrame(X)\n",
      "    \n",
      "    \u001b[37m# Split data set for training and validation\u001b[39;49;00m\n",
      "    \u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36msklearn\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m model_selection\n",
      "    x_train, x_validation, y_train, y_validation = model_selection.train_test_split(X, Y, test_size=\u001b[34m0.1\u001b[39;49;00m, random_state=\u001b[34m123\u001b[39;49;00m)\n",
      "\n",
      "    \u001b[37m# Number of features in the training set (we need to pass this value to the input layer)\u001b[39;49;00m\n",
      "    input_dim = x_train.shape[\u001b[34m1\u001b[39;49;00m]\n",
      "\n",
      "    \u001b[37m# Build simple neural network\u001b[39;49;00m\n",
      "    \u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mkeras.models\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m Sequential\n",
      "    \u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mkeras.layers\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m Dense\n",
      "    model = Sequential()\n",
      "    model.add(Dense(dense_layer, input_dim=input_dim, activation=\u001b[33m'\u001b[39;49;00m\u001b[33mrelu\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m))\n",
      "    \u001b[34mfor\u001b[39;49;00m i \u001b[35min\u001b[39;49;00m \u001b[36mrange\u001b[39;49;00m(\u001b[36mint\u001b[39;49;00m(layers)-\u001b[34m1\u001b[39;49;00m):\n",
      "        model.add(Dense(dense_layer, activation=\u001b[33m'\u001b[39;49;00m\u001b[33mrelu\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m))\n",
      "    model.add(Dense(\u001b[34m1\u001b[39;49;00m, activation=\u001b[33m'\u001b[39;49;00m\u001b[33msigmoid\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m)) \u001b[37m# because we want a probability between 0 and 1\u001b[39;49;00m\n",
      "                                              \u001b[37m# https://en.wikipedia.org/wiki/Sigmoid_function\u001b[39;49;00m\n",
      "\n",
      "    \u001b[34mfrom\u001b[39;49;00m \u001b[04m\u001b[36mkeras.optimizers\u001b[39;49;00m \u001b[34mimport\u001b[39;49;00m SGD\n",
      "    sgd = SGD(lr=learning_rate)\n",
      "    \n",
      "    \u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mkeras_metrics\u001b[39;49;00m\n",
      "    model.compile(loss=\u001b[33m'\u001b[39;49;00m\u001b[33mbinary_crossentropy\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, optimizer=sgd, \n",
      "              metrics=[\u001b[33m'\u001b[39;49;00m\u001b[33mbinary_accuracy\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, \n",
      "                   keras_metrics.precision(), \n",
      "                   keras_metrics.recall(),\n",
      "                   keras_metrics.f1_score()])\n",
      "\n",
      "    \u001b[34mprint\u001b[39;49;00m(model.summary())\n",
      "\n",
      "    \u001b[37m# Train\u001b[39;49;00m\n",
      "    model.fit(x_train, y_train, validation_data=(x_validation, y_validation), \n",
      "          epochs=epochs, batch_size=batch_size)\n",
      "    \n",
      "    \u001b[37m# Evaluate\u001b[39;49;00m\n",
      "    score = model.evaluate(x_validation, y_validation, verbose=\u001b[34m0\u001b[39;49;00m)\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mValidation loss    :\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, score[\u001b[34m0\u001b[39;49;00m])\n",
      "    \u001b[34mprint\u001b[39;49;00m(\u001b[33m'\u001b[39;49;00m\u001b[33mValidation accuracy:\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m, score[\u001b[34m1\u001b[39;49;00m])\n",
      "    \n",
      "    \u001b[37m# save Keras model for Tensorflow Serving\u001b[39;49;00m\n",
      "    \u001b[34mimport\u001b[39;49;00m \u001b[04m\u001b[36mkeras.backend\u001b[39;49;00m \u001b[34mas\u001b[39;49;00m \u001b[04m\u001b[36mK\u001b[39;49;00m\n",
      "    sess = K.get_session()\n",
      "    tf.saved_model.simple_save(\n",
      "        sess,\n",
      "        os.path.join(model_dir, \u001b[33m'\u001b[39;49;00m\u001b[33mmodel/1\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m),\n",
      "        inputs={\u001b[33m'\u001b[39;49;00m\u001b[33minputs\u001b[39;49;00m\u001b[33m'\u001b[39;49;00m: model.input},\n",
      "        outputs={t.name: t \u001b[34mfor\u001b[39;49;00m t \u001b[35min\u001b[39;49;00m model.outputs})\n"
     ]
    }
   ],
   "source": [
    "!pygmentize dm_keras_tf.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "1.50.9\n"
     ]
    }
   ],
   "source": [
    "import sagemaker\n",
    "import boto3\n",
    "\n",
    "print (sagemaker.__version__)\n",
    "\n",
    "sess   = sagemaker.Session()\n",
    "bucket = sess.default_bucket()                     \n",
    "prefix = 'sagemaker-autopilot/DEMO-hpo-keras-dm'\n",
    "region = boto3.Session().region_name\n",
    "\n",
    "# Role when working on a notebook instance\n",
    "role = sagemaker.get_execution_role()\n",
    "# Role when working locally\n",
    "# role = ROLE_ARN"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "We upload the raw dataset to S3, as the Keras script itself will perform basic preprocessing."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "s3://sagemaker-us-east-1-806570384721/sagemaker-autopilot/DEMO-hpo-keras-dm/training/bank-additional-full.csv\n"
     ]
    }
   ],
   "source": [
    "training_input_path = sess.upload_data('bank-additional/bank-additional-full.csv', key_prefix=prefix+'/training')\n",
    "\n",
    "print(training_input_path)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Configure Automatic Model Tuning"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sagemaker.tensorflow import TensorFlow\n",
    "\n",
    "tf_estimator = TensorFlow(entry_point='dm_keras_tf.py', \n",
    "                          role=role,\n",
    "                          train_instance_count=1, \n",
    "                          train_instance_type='ml.c5.2xlarge',\n",
    "                          framework_version='1.14', \n",
    "                          py_version='py3',\n",
    "                          script_mode=True,\n",
    "                          train_use_spot_instances=True,        # Use spot instance\n",
    "                          train_max_run=600,                    # Max training time\n",
    "                          train_max_wait=3600                   # Max training time + spot waiting time\n",
    "                         )"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's try to tune our Keras model on two architecture parameters: number of dense layers, and dense layer width.\n",
    "\n",
    "We're using the F1 metric again. It's not natively supported in Keras, and requires the addition of the keras-metrics package. Installation is done in the script itself. We also need to pass a regular expression so that SageMaker can locate and extract the metric from the training log."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sagemaker.tuner import IntegerParameter, ContinuousParameter, HyperparameterTuner\n",
    "\n",
    "hyperparameter_ranges = {\n",
    "    'epochs':        IntegerParameter(1, 5),\n",
    "    'learning-rate': ContinuousParameter(0.001, 0.1, scaling_type='ReverseLogarithmic'), # useful for values<1\n",
    "    'batch-size':    IntegerParameter(16, 1024, scaling_type='Logarithmic'),\n",
    "    'layers':        IntegerParameter(1, 4),\n",
    "    'dense-layer':   IntegerParameter(4, 64)\n",
    "}\n",
    "\n",
    "objective_metric_name = 'f1_score'\n",
    "objective_type = 'Maximize'\n",
    "metric_definitions = [{'Name': 'f1_score', 'Regex': 'val_f1_score: ([0-9\\\\.]+)'}]\n",
    "\n",
    "tuner = HyperparameterTuner(tf_estimator,\n",
    "                            objective_metric_name,\n",
    "                            hyperparameter_ranges,\n",
    "                            metric_definitions,\n",
    "                            max_jobs=20,\n",
    "                            max_parallel_jobs=2,\n",
    "                            objective_type=objective_type)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "tuner.fit({'training': training_input_path})"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "You can repeatedly run the cells below while the job is running."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Reminder: the tuning job has not been completed.\n",
      "10 training jobs have completed\n"
     ]
    }
   ],
   "source": [
    "sagemaker = boto3.Session().client(service_name='sagemaker') \n",
    "\n",
    "job_name = tuner.latest_tuning_job.job_name\n",
    "\n",
    "# run this cell to check current status of hyperparameter tuning job\n",
    "tuning_job_result = sagemaker.describe_hyper_parameter_tuning_job(HyperParameterTuningJobName=job_name)\n",
    "\n",
    "status = tuning_job_result['HyperParameterTuningJobStatus']\n",
    "if status != 'Completed':\n",
    "    print('Reminder: the tuning job has not been completed.')\n",
    "    \n",
    "job_count = tuning_job_result['TrainingJobStatusCounters']['Completed']\n",
    "print(\"%d training jobs have completed\" % job_count)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Inspect jobs with Amazon SageMaker Experiments"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "from sagemaker.analytics import HyperparameterTuningJobAnalytics\n",
    "\n",
    "exp = HyperparameterTuningJobAnalytics(\n",
    "    sagemaker_session=sess, \n",
    "    hyperparameter_tuning_job_name=tuner.latest_tuning_job.name\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = exp.dataframe()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>FinalObjectiveValue</th>\n",
       "      <th>TrainingElapsedTimeSeconds</th>\n",
       "      <th>TrainingEndTime</th>\n",
       "      <th>TrainingJobName</th>\n",
       "      <th>TrainingJobStatus</th>\n",
       "      <th>TrainingStartTime</th>\n",
       "      <th>batch-size</th>\n",
       "      <th>dense-layer</th>\n",
       "      <th>epochs</th>\n",
       "      <th>layers</th>\n",
       "      <th>learning-rate</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0.3007</td>\n",
       "      <td>89.0</td>\n",
       "      <td>2020-02-04 18:33:13+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-020-79334ef9</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:31:44+00:00</td>\n",
       "      <td>115.0</td>\n",
       "      <td>59.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.097707</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>0.1775</td>\n",
       "      <td>61.0</td>\n",
       "      <td>2020-02-04 18:31:51+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-019-c2d7394a</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:30:50+00:00</td>\n",
       "      <td>42.0</td>\n",
       "      <td>62.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.005088</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>0.3861</td>\n",
       "      <td>42.0</td>\n",
       "      <td>2020-02-04 18:29:37+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-018-6035fda7</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:28:55+00:00</td>\n",
       "      <td>62.0</td>\n",
       "      <td>49.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.075698</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>0.3189</td>\n",
       "      <td>90.0</td>\n",
       "      <td>2020-02-04 18:28:50+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-017-8133cb0d</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:27:20+00:00</td>\n",
       "      <td>21.0</td>\n",
       "      <td>43.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>0.020413</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>0.0000</td>\n",
       "      <td>44.0</td>\n",
       "      <td>2020-02-04 18:25:07+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-016-2d479e6a</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:24:23+00:00</td>\n",
       "      <td>438.0</td>\n",
       "      <td>57.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.079491</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>5</th>\n",
       "      <td>0.3574</td>\n",
       "      <td>122.0</td>\n",
       "      <td>2020-02-04 18:26:29+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-015-1df02159</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:24:27+00:00</td>\n",
       "      <td>19.0</td>\n",
       "      <td>57.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.047803</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>6</th>\n",
       "      <td>0.3701</td>\n",
       "      <td>42.0</td>\n",
       "      <td>2020-02-04 18:22:28+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-014-71492293</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:21:46+00:00</td>\n",
       "      <td>30.0</td>\n",
       "      <td>63.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.052032</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>0.4615</td>\n",
       "      <td>64.0</td>\n",
       "      <td>2020-02-04 18:22:13+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-013-917f8f1d</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:21:09+00:00</td>\n",
       "      <td>16.0</td>\n",
       "      <td>61.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.076594</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>8</th>\n",
       "      <td>0.4563</td>\n",
       "      <td>63.0</td>\n",
       "      <td>2020-02-04 18:19:29+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-012-ac34996f</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:18:26+00:00</td>\n",
       "      <td>17.0</td>\n",
       "      <td>60.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.076594</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>9</th>\n",
       "      <td>0.4573</td>\n",
       "      <td>44.0</td>\n",
       "      <td>2020-02-04 18:19:04+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-011-401e68a9</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:18:20+00:00</td>\n",
       "      <td>46.0</td>\n",
       "      <td>39.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.091455</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>10</th>\n",
       "      <td>0.3701</td>\n",
       "      <td>45.0</td>\n",
       "      <td>2020-02-04 18:16:27+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-010-16079d2a</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:15:42+00:00</td>\n",
       "      <td>46.0</td>\n",
       "      <td>37.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.089557</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>11</th>\n",
       "      <td>0.0043</td>\n",
       "      <td>115.0</td>\n",
       "      <td>2020-02-04 18:16:03+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-009-fc16c206</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:14:08+00:00</td>\n",
       "      <td>443.0</td>\n",
       "      <td>29.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.052442</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12</th>\n",
       "      <td>0.0000</td>\n",
       "      <td>66.0</td>\n",
       "      <td>2020-02-04 18:13:46+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-008-2e1c365d</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:12:40+00:00</td>\n",
       "      <td>16.0</td>\n",
       "      <td>44.0</td>\n",
       "      <td>3.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.059202</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>13</th>\n",
       "      <td>0.3987</td>\n",
       "      <td>64.0</td>\n",
       "      <td>2020-02-04 18:12:05+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-007-9d4728c2</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:11:01+00:00</td>\n",
       "      <td>32.0</td>\n",
       "      <td>50.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.051651</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>14</th>\n",
       "      <td>0.4340</td>\n",
       "      <td>208.0</td>\n",
       "      <td>2020-02-04 18:10:28+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-006-c5e1729e</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:07:00+00:00</td>\n",
       "      <td>16.0</td>\n",
       "      <td>62.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.078520</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>15</th>\n",
       "      <td>0.0000</td>\n",
       "      <td>66.0</td>\n",
       "      <td>2020-02-04 18:08:39+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-005-6db98420</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:07:33+00:00</td>\n",
       "      <td>22.0</td>\n",
       "      <td>51.0</td>\n",
       "      <td>5.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.065931</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>16</th>\n",
       "      <td>0.3805</td>\n",
       "      <td>72.0</td>\n",
       "      <td>2020-02-04 18:04:58+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-004-2fa5580f</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:03:46+00:00</td>\n",
       "      <td>34.0</td>\n",
       "      <td>50.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>2.0</td>\n",
       "      <td>0.053629</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>17</th>\n",
       "      <td>0.3645</td>\n",
       "      <td>81.0</td>\n",
       "      <td>2020-02-04 18:05:04+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-003-73fdd176</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:03:43+00:00</td>\n",
       "      <td>19.0</td>\n",
       "      <td>62.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.031993</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18</th>\n",
       "      <td>0.0000</td>\n",
       "      <td>120.0</td>\n",
       "      <td>2020-02-04 18:01:37+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-002-bb2bb9ea</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 17:59:37+00:00</td>\n",
       "      <td>46.0</td>\n",
       "      <td>26.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.006721</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>19</th>\n",
       "      <td>0.1008</td>\n",
       "      <td>77.0</td>\n",
       "      <td>2020-02-04 18:01:13+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-001-764cb40e</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 17:59:56+00:00</td>\n",
       "      <td>95.0</td>\n",
       "      <td>48.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>0.028867</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "    FinalObjectiveValue  TrainingElapsedTimeSeconds           TrainingEndTime  \\\n",
       "0                0.3007                        89.0 2020-02-04 18:33:13+00:00   \n",
       "1                0.1775                        61.0 2020-02-04 18:31:51+00:00   \n",
       "2                0.3861                        42.0 2020-02-04 18:29:37+00:00   \n",
       "3                0.3189                        90.0 2020-02-04 18:28:50+00:00   \n",
       "4                0.0000                        44.0 2020-02-04 18:25:07+00:00   \n",
       "5                0.3574                       122.0 2020-02-04 18:26:29+00:00   \n",
       "6                0.3701                        42.0 2020-02-04 18:22:28+00:00   \n",
       "7                0.4615                        64.0 2020-02-04 18:22:13+00:00   \n",
       "8                0.4563                        63.0 2020-02-04 18:19:29+00:00   \n",
       "9                0.4573                        44.0 2020-02-04 18:19:04+00:00   \n",
       "10               0.3701                        45.0 2020-02-04 18:16:27+00:00   \n",
       "11               0.0043                       115.0 2020-02-04 18:16:03+00:00   \n",
       "12               0.0000                        66.0 2020-02-04 18:13:46+00:00   \n",
       "13               0.3987                        64.0 2020-02-04 18:12:05+00:00   \n",
       "14               0.4340                       208.0 2020-02-04 18:10:28+00:00   \n",
       "15               0.0000                        66.0 2020-02-04 18:08:39+00:00   \n",
       "16               0.3805                        72.0 2020-02-04 18:04:58+00:00   \n",
       "17               0.3645                        81.0 2020-02-04 18:05:04+00:00   \n",
       "18               0.0000                       120.0 2020-02-04 18:01:37+00:00   \n",
       "19               0.1008                        77.0 2020-02-04 18:01:13+00:00   \n",
       "\n",
       "                                 TrainingJobName TrainingJobStatus  \\\n",
       "0   tensorflow-training-200204-1757-020-79334ef9         Completed   \n",
       "1   tensorflow-training-200204-1757-019-c2d7394a         Completed   \n",
       "2   tensorflow-training-200204-1757-018-6035fda7         Completed   \n",
       "3   tensorflow-training-200204-1757-017-8133cb0d         Completed   \n",
       "4   tensorflow-training-200204-1757-016-2d479e6a         Completed   \n",
       "5   tensorflow-training-200204-1757-015-1df02159         Completed   \n",
       "6   tensorflow-training-200204-1757-014-71492293         Completed   \n",
       "7   tensorflow-training-200204-1757-013-917f8f1d         Completed   \n",
       "8   tensorflow-training-200204-1757-012-ac34996f         Completed   \n",
       "9   tensorflow-training-200204-1757-011-401e68a9         Completed   \n",
       "10  tensorflow-training-200204-1757-010-16079d2a         Completed   \n",
       "11  tensorflow-training-200204-1757-009-fc16c206         Completed   \n",
       "12  tensorflow-training-200204-1757-008-2e1c365d         Completed   \n",
       "13  tensorflow-training-200204-1757-007-9d4728c2         Completed   \n",
       "14  tensorflow-training-200204-1757-006-c5e1729e         Completed   \n",
       "15  tensorflow-training-200204-1757-005-6db98420         Completed   \n",
       "16  tensorflow-training-200204-1757-004-2fa5580f         Completed   \n",
       "17  tensorflow-training-200204-1757-003-73fdd176         Completed   \n",
       "18  tensorflow-training-200204-1757-002-bb2bb9ea         Completed   \n",
       "19  tensorflow-training-200204-1757-001-764cb40e         Completed   \n",
       "\n",
       "           TrainingStartTime  batch-size  dense-layer  epochs  layers  \\\n",
       "0  2020-02-04 18:31:44+00:00       115.0         59.0     1.0     1.0   \n",
       "1  2020-02-04 18:30:50+00:00        42.0         62.0     4.0     1.0   \n",
       "2  2020-02-04 18:28:55+00:00        62.0         49.0     3.0     1.0   \n",
       "3  2020-02-04 18:27:20+00:00        21.0         43.0     2.0     3.0   \n",
       "4  2020-02-04 18:24:23+00:00       438.0         57.0     2.0     4.0   \n",
       "5  2020-02-04 18:24:27+00:00        19.0         57.0     2.0     1.0   \n",
       "6  2020-02-04 18:21:46+00:00        30.0         63.0     2.0     1.0   \n",
       "7  2020-02-04 18:21:09+00:00        16.0         61.0     4.0     1.0   \n",
       "8  2020-02-04 18:18:26+00:00        17.0         60.0     4.0     1.0   \n",
       "9  2020-02-04 18:18:20+00:00        46.0         39.0     2.0     2.0   \n",
       "10 2020-02-04 18:15:42+00:00        46.0         37.0     2.0     2.0   \n",
       "11 2020-02-04 18:14:08+00:00       443.0         29.0     4.0     2.0   \n",
       "12 2020-02-04 18:12:40+00:00        16.0         44.0     3.0     4.0   \n",
       "13 2020-02-04 18:11:01+00:00        32.0         50.0     4.0     2.0   \n",
       "14 2020-02-04 18:07:00+00:00        16.0         62.0     4.0     1.0   \n",
       "15 2020-02-04 18:07:33+00:00        22.0         51.0     5.0     2.0   \n",
       "16 2020-02-04 18:03:46+00:00        34.0         50.0     4.0     2.0   \n",
       "17 2020-02-04 18:03:43+00:00        19.0         62.0     1.0     4.0   \n",
       "18 2020-02-04 17:59:37+00:00        46.0         26.0     1.0     4.0   \n",
       "19 2020-02-04 17:59:56+00:00        95.0         48.0     1.0     4.0   \n",
       "\n",
       "    learning-rate  \n",
       "0        0.097707  \n",
       "1        0.005088  \n",
       "2        0.075698  \n",
       "3        0.020413  \n",
       "4        0.079491  \n",
       "5        0.047803  \n",
       "6        0.052032  \n",
       "7        0.076594  \n",
       "8        0.076594  \n",
       "9        0.091455  \n",
       "10       0.089557  \n",
       "11       0.052442  \n",
       "12       0.059202  \n",
       "13       0.051651  \n",
       "14       0.078520  \n",
       "15       0.065931  \n",
       "16       0.053629  \n",
       "17       0.031993  \n",
       "18       0.006721  \n",
       "19       0.028867  "
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "'FinalObjectiveValue' is the F1 score. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>FinalObjectiveValue</th>\n",
       "      <th>TrainingElapsedTimeSeconds</th>\n",
       "      <th>TrainingEndTime</th>\n",
       "      <th>TrainingJobName</th>\n",
       "      <th>TrainingJobStatus</th>\n",
       "      <th>TrainingStartTime</th>\n",
       "      <th>batch-size</th>\n",
       "      <th>dense-layer</th>\n",
       "      <th>epochs</th>\n",
       "      <th>layers</th>\n",
       "      <th>learning-rate</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>7</th>\n",
       "      <td>0.4615</td>\n",
       "      <td>64.0</td>\n",
       "      <td>2020-02-04 18:22:13+00:00</td>\n",
       "      <td>tensorflow-training-200204-1757-013-917f8f1d</td>\n",
       "      <td>Completed</td>\n",
       "      <td>2020-02-04 18:21:09+00:00</td>\n",
       "      <td>16.0</td>\n",
       "      <td>61.0</td>\n",
       "      <td>4.0</td>\n",
       "      <td>1.0</td>\n",
       "      <td>0.076594</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   FinalObjectiveValue  TrainingElapsedTimeSeconds           TrainingEndTime  \\\n",
       "7               0.4615                        64.0 2020-02-04 18:22:13+00:00   \n",
       "\n",
       "                                TrainingJobName TrainingJobStatus  \\\n",
       "7  tensorflow-training-200204-1757-013-917f8f1d         Completed   \n",
       "\n",
       "          TrainingStartTime  batch-size  dense-layer  epochs  layers  \\\n",
       "7 2020-02-04 18:21:09+00:00        16.0         61.0     4.0     1.0   \n",
       "\n",
       "   learning-rate  \n",
       "7       0.076594  "
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.sort_values('FinalObjectiveValue', ascending=0)[:1]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "How does this compare to what you achieved in the first two labs?"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "conda_python3",
   "language": "python",
   "name": "conda_python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
