{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "kR-4eNdK6lYS"
   },
   "source": [
    "Deep Learning\n",
    "=============\n",
    "\n",
    "Assignment 3\n",
    "------------\n",
    "\n",
    "Previously in `2_fullyconnected.ipynb`, you trained a logistic regression and a neural network model.\n",
    "\n",
    "The goal of this assignment is to explore regularization techniques."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "JLpLa8Jt7Vu4"
   },
   "outputs": [],
   "source": [
    "# These are all the modules we'll be using later. Make sure you can import them\n",
    "# before proceeding further.\n",
    "from __future__ import print_function\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from six.moves import cPickle as pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Some personnal imports\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "1HrCK6e17WzV"
   },
   "source": [
    "First reload the data we generated in `1_notmnist.ipynb`."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 11777,
     "status": "ok",
     "timestamp": 1449849322348,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "y3-cj1bpmuxc",
    "outputId": "e03576f1-ebbe-4838-c388-f1777bcc9873"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 28, 28) (200000,)\n",
      "Validation set (10000, 28, 28) (10000,)\n",
      "Test set (10000, 28, 28) (10000,)\n"
     ]
    }
   ],
   "source": [
    "pickle_file = 'notMNIST.pickle'\n",
    "\n",
    "with open(pickle_file, 'rb') as f:\n",
    "  save = pickle.load(f)\n",
    "  train_dataset = save['train_dataset']\n",
    "  train_labels = save['train_labels']\n",
    "  valid_dataset = save['valid_dataset']\n",
    "  valid_labels = save['valid_labels']\n",
    "  test_dataset = save['test_dataset']\n",
    "  test_labels = save['test_labels']\n",
    "  del save  # hint to help gc free up memory\n",
    "  print('Training set', train_dataset.shape, train_labels.shape)\n",
    "  print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "  print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "L7aHrm6nGDMB"
   },
   "source": [
    "Reformat into a shape that's more adapted to the models we're going to train:\n",
    "- data as a flat matrix,\n",
    "- labels as float 1-hot encodings."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 11728,
     "status": "ok",
     "timestamp": 1449849322356,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "IRSyYiIIGIzS",
    "outputId": "3f8996ee-3574-4f44-c953-5c8a04636582"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 784) (200000, 10)\n",
      "Validation set (10000, 784) (10000, 10)\n",
      "Test set (10000, 784) (10000, 10)\n"
     ]
    }
   ],
   "source": [
    "image_size = 28\n",
    "num_labels = 10\n",
    "\n",
    "def reformat(dataset, labels):\n",
    "  dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n",
    "  # Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]\n",
    "  labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n",
    "  return dataset, labels\n",
    "train_dataset, train_labels = reformat(train_dataset, train_labels)\n",
    "valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\n",
    "test_dataset, test_labels = reformat(test_dataset, test_labels)\n",
    "print('Training set', train_dataset.shape, train_labels.shape)\n",
    "print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "RajPLaL_ZW6w"
   },
   "outputs": [],
   "source": [
    "def accuracy(predictions, labels):\n",
    "  return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n",
    "          / predictions.shape[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "sgLbUAQ1CW-1"
   },
   "source": [
    "---\n",
    "Problem 1\n",
    "---------\n",
    "\n",
    "Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor `t` using `nn.l2_loss(t)`. The right amount of regularization should improve your validation / test accuracy.\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's start with the logistic model:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_labels]))\n",
    "  biases = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  logits = tf.matmul(tf_train_dataset, weights) + biases\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + beta_regul * tf.nn.l2_loss(weights)\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  valid_prediction = tf.nn.softmax(\n",
    "    tf.matmul(tf_valid_dataset, weights) + biases)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 21.668259\n",
      "Minibatch accuracy: 9.4%\n",
      "Validation accuracy: 7.9%\n",
      "Minibatch loss at step 500: 2.643234\n",
      "Minibatch accuracy: 78.1%\n",
      "Validation accuracy: 76.3%\n",
      "Minibatch loss at step 1000: 1.774686\n",
      "Minibatch accuracy: 78.9%\n",
      "Validation accuracy: 78.2%\n",
      "Minibatch loss at step 1500: 1.071145\n",
      "Minibatch accuracy: 82.0%\n",
      "Validation accuracy: 79.4%\n",
      "Minibatch loss at step 2000: 0.842100\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 80.3%\n",
      "Minibatch loss at step 2500: 0.831049\n",
      "Minibatch accuracy: 79.7%\n",
      "Validation accuracy: 81.2%\n",
      "Minibatch loss at step 3000: 0.764314\n",
      "Minibatch accuracy: 84.4%\n",
      "Validation accuracy: 81.6%\n",
      "Test accuracy: 88.8%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The L2 regularization introduces a new meta parameter that should be tuned. Since I do not have any idea of what should be the right value for this meta parameter, I will plot the accuracy by the meta parameter value (in a logarithmic scale)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:\n",
    "  with tf.Session(graph=graph) as session:\n",
    "    tf.initialize_all_variables().run()\n",
    "    for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "      offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "      batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "      batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "      feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "      _, l, predictions = session.run(\n",
    "        [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX0AAAEMCAYAAAAoB2Y1AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xd8VFX6x/HPkwKBAKEHCB2kSSco\ngiir2Au2XXHtLou6uq6667quupZdXde1rP5si70XbKBYsEVFBaWH3gkkRHpJCKnn98e9cceQBplk\nZjLf9+s1r8zce869z505eebOmTP3mHMOERGJDjGhDkBEROqOkr6ISBRR0hcRiSJK+iIiUURJX0Qk\niijpi4hEESV9CXtmlmBmzsw6hjqWA2VmM83sghrUX21mRwQ5poZmlmNmHYK53YDtP2hmV/j3TzSz\nVUHY5kHHbGZ3mNkj1Sj3qJldenARRg4l/SDwG2PprcTM8gIen1+D7dYoYUjkc871cM59V5NtlG1H\nzrl851wT51xWzSPcb18pwDnAM8HcbnVjLu9Nxjl3m3Pu6mrs5t/AbWYWW5NYw52SfhD4jbGJc64J\nkAGcFrDs5VDHV1vMLC7UMdRUuB5DuMZVDZcB7zrnCkIdyIFyzq0DNgAnhTiUWqWkXwfMLNbMbjWz\nNWa21cxeNrPm/rpEM3vNzLab2U4zm2VmLczsfmA48JT/ieH+crYbZ2ZvmdmPft0vzKx3wPpEM3vY\nzDaY2S4z+7I0mZjZGP8McJeZZZjZr/3lPzsrNLMrzOxT/35pN8uVZrYaWOQvf9zMNprZbjP73sxG\nlInxNv/Yd5vZD2bWzsyeNrO7yhzPdDO7spKn8gwzW2dmW8zsLvM09rd7SMB2OprZ3tLnuMw+rjCz\nz/2P8juAv/jLLzez5f7rMM0/Yy2tc4qZrfSf4/8EPkdmdo+ZPRVQto+ZFZUXvL8uzd/HFjN73sya\nBqzPNrM/mdliYHfAsiP9NhT4iTLXfy3amVkbM/vQ3+Z2M5tiZu39+vu1IyvTXWZmLc3sFb/+WjP7\ns5lZwPP1md+OdprX3TS2ktfoJODLilaa2QAz+9rf1kIzOylgXVv/OHb7z/E95bS90pjHmdkyM9vj\nt+9rzKwV8A7QPeB5alXOa1Ru2/elAadUcnyRzzmnWxBvwDpgbJllNwJfAx2ABOA54Fl/3R+AN4FG\nQBzeP2iiv24mcEEl+4oDLgKa+Nt9HJgZsP5pYDrQDogFRvt/ewI5wNn+NtoAg8rbJ3AF8Kl/PwFw\nwDSgOdDIX34R0AKIB27GO1uK99fdCszz9xkDDPHrHgWsBcwv1wHYC7Qs5zhL9/uxX7cbsKY0Tryu\nhDvKPN+TK3jOrgCKgN/6z0Uj4FxgKdDLP4Z/AF/45dv7z9Wp/ro/A4UB+74HeCpg+32AooDHMwPK\n9gGOARr4r8lM4J6AstnAD/5z0Shg2ZHlHMcDwKf+MSQD4/xjSQKmAK+VF0OZ57Oj//gNYLLfjnr6\nr8v5Ac9Xof8axwLXAesqaZN7gAEBj08EVgXsNwP4o/9cnuA/t9389e8CL/jHMRDYxP5trzTmbcBh\n/v1WwJCy+wuI4afXiEravr/+18C3oc4jtXkLeQD17Ub5SX8tMCrgcTe8BGfA7/DOjPqXs61Kk345\n5dsBJf4/SLz/z9q7nHJ3AK9WsI3qJP2RlcRg/rH19h+vB06ooNwaYLT/+E/A2xVss3S/YwKWXQ9M\n8+8fHfiPDqQDp1ewrSuAFWWWfVGa5PzHpc9dMjAR/w3AXxcDbOYgkn45sYwHvgt4nA38ukyZ/ZI+\nXgJeRTlvkP76EcCmSl7TnxIo0BAoBroHrP8D8FHA87UoYF1Lv27zcvYb66/rGrAsMOkf57cHC1j/\nDt6nrQS/7XYJWHdfOW2vNOlvBi4FmpaJoaqkX2Hb99efBiyp7v9cJN7UvVPL/I/JnYAP/I+0O/HO\nfGPwzlCexkv6b/pdJHdbNb9I8rtO7i/tOgGW4SXTVnhnqHHA6nKqdqpgeXVtKBPHTX7XyC5gB94/\naGv/2FPK25fz/sNeAEq7ki4AXjyA/a7HOyMG+AqINbMjzGww3rF/WN34gS7AEwGvzxa8TwMd/X38\nVN45VwJkVhFnucysg5lNNrNM//V6CmhdRWxlt3E4cD8wzjm33V/W1Mye8bsqduN9uiu73Yq0w2uL\nGQHL1uO9bqWyA+7v9f82Kbsh51wx3pl+07LrfB2ADP+1L7uvdnhtd2PAusqei3F4Z+sZfnfd8ErK\nBqqq7TcFdlZzWxFJSb+W+Q08EzjGOdc84JbgnNvqvFEJf3PO9cHr8vgl3hkgeGc2lbkU7+zpF3gf\n6/v4yw3vo3ER0KOcehsqWA6QCzQOeNyuvMMqvWNmxwG/B87E63ppCeThnc2VHntF+3oBOMfMhuH9\nM06roFypTgH3OwNZsN8byIV4XRuFlWyn7PO6AbikzOvTyDk3B+95/GmoqJnF8POEWJ3nq9S//fL9\nnXPNgAl4r1Vlsf3EvOGKbwETnHOLA1b9xY9xuL/d48tst7J2lI13ht05YFlnDvKNDViI101Wnqwy\n+wncVzZenIHPbScq4Jz7zjl3Kt6nsenAK6WrqoivsrYP0BdYUMU2IpqSft14ArjHzDrBT19Ynebf\nH2tm/fxkshsvUZf49X4Euley3abAPrz+zUS8vmgA/KT3AvCQmSX7XwQe6X+KeBE41czO9D8ttDGz\ngX7V+XiJOMHM+gCXVHFsTfG6Qrbg9VXfiXemX+op4G4z626eIeZ/weqcWwMsAZ4FXndVj/i40cyS\nzKwrcDXwesC6F4BfAef59w/EE8At5n8Jbt4X6Wf766YCh5vZyeZ9CX493vcXpeYDvzCzFDNrgfd9\nQkWa4vUn7zazzv62qsXMGgBvA/91zk0pZ7t7gZ1m1hq4pcz6CtuRcy4fr4vlbvO++O+B173zUnVj\nK+MDvO628nwNxJjZtX67Ow7vDeoN59w+4D3gDr/t9cfrX9+PH+d4M2uG1/b28PP/mbZmtt8nEV9l\nbR8/9so+JUY8Jf26cS/el26fm9ke4FtgqL8uBe+Ltz14o2E+4H/J7EHgIjPbYWb3lrPdp/GSbTZe\nP/aMMuuvwfsoOw/vjeHveGfgq/A+Hv8V2A7MBg4NiDXO3+4kqv7nfw+ve2U1Xh/9Vr9uqXvwzuA/\nx3tTewKvH7nU88AAqu7awd/OAj/eyYGxOedWA8uBPc6576uxrZ84514FHgHe9rtH5uN9gsI5twnv\njeRh/9g64j3X+QExvY/35jUT78vIivwNOBLYhZdo3zqAMLsDh+O98QWO4mmL1/fdGu81noHXhgJV\n1Y4u9/+ux3udngIOdqjxc3ijrBqUXeEn9lPxxvFvw/sy+lz/zb80jg547ecp4FX+9zyXdZkf7y68\n7zgu8pcvwHujXu9317UsE0OFbd/MuuB19VX1iTOilY6cEAkJMzseeMw51zMI23oF70u4f1RZ+OD3\nEYf3Jnuaq+GPpuorM3sA78vyJ2q4nYeABOfc5VUWDgIzexSY45wL6g/Lwo2SvoRMQJfFV8658s5A\nD2RbPYG5QF/n3MH2R1e07ZPwPp3l4w1JvRjoWY3uKDkAfpeOw/vUdATeGfd5zrmPQhpYPaPuHQkJ\nf5TNDrz+6EdruK178bqw7gx2wveV/qZgM3AscKYSfq1IwusuzMXruvuHEn7w6UxfRCSK6ExfRCSK\nKOmLiESRsLuSX+vWrV3Xrl0Pun5ubi6JiYnBC0jkAKj9SajMmTNnq3OuTVXlwi7pd+3aldmzZx90\n/bS0NMaMGRO8gEQOgNqfhIqZra9OOXXviIhEESV9EZEooqQvIhJFlPRFRKKIkr6ISBRR0hcRiSJh\nN2RTROo35xz5RSXk5BeRm1/k/y0mt6CIHq2b0LlV46o3IgdNSV8kwu0tKGJJ1m7SM3exK6+yCcNq\nl3OQX1TC3oKinxJ6bn5xwH1/eUExxSXlX/PLDI7vl8zEo3owrEuLcstIzSjpi0SQfYXFLNm0m/SN\nu1i4cRfpmTtZtTmHCnJonYuPNRIbxpHYII4mDeNIbBhL04Q42iclkNjQW9a4QexP972/3uNG8bGk\nLd/CizPX8/HiHxnWpQUTj+rO2L7JxMaUnVVSDpaSvkiY2ldYzLLsPaRn7iJ9404WbtzFys05P50l\nt27SgIEdm3Ni//YMTEliQMck2jZtWMVWa5dZzZJzateWXDmmB5Nnb+CpGWu5/MU5dGudyITR3Th7\naEcS4mODFGn0UtIXCRMZ2/YyY9VW0jO9BL88ew9FfoJvmdiAASlJHNcvmf4pSQzsmES7Zgk1TrLh\nKLFhHJeM6sYFI7rw0eJsJn21hpvfWcQD01dw0RFdufCILrRM3G82RqmmaiV9M7sOmIA3q006cCkw\nEm9uzgbAHOA3zrmicupezP8mav6Hc+75IMQtUm8453jl+wzueG8JBUUlJDWKZ2DHJCYe1Z0B/hl8\nSvNG9TLBVyYuNoZTB3bglAHtmbV2O5O+WsODn67g8S9X8cthnfjNkd3o2loXtztQVSZ9M0vBm2C7\nn3Muz8zewJul/g7gWOfcCjO7E28KuafL1G0J3Aak4r1hzDGzqc65HUE+DpGIlJtfxF/fSWfK/CyO\n7tWG208/lK6tGkddgq+MmTGieytGdG/Fyh/38OTXa3j9hw28NGs9J/Rrx8SjuzO0s770ra7qdu/E\nAY3MrBBojDedWYFzboW//hPgJsokfeAE4BPn3HYAM/sEOBFvlnuRqLY8ew+/e3kOa7fmcsMJvbny\n6B7E6AvLSh2S3JR7zxnEn47vzXPfruOlmev5aHE2w7u24LejvS999RxWrsqk75zLNLP7gAwgD5gO\nvAHca2apzrnZwDlAp3KqpwAbAh5v9JeJRLU352zklnfTadIwnpcmHM7IHq1DHVJEadssgT+f2Ier\nftGT13/YwNMz1jLxxTmYQWIDb9TQTyOEGvx8lFDpqKHAZaXLe7VtSlLj+FAfXq2qTvdOC2Ac0A3Y\nCUwGzgfGAw+aWUO8N4Ligw3CzCYCEwGSk5NJS0s72E2Rk5NTo/oiNVFV+ysodry0tICvNhbRp2UM\nVwyKoWDDItI2VFhFqtAduPMwY87mhmzYU8K+Ise+omL2FRexbx9szXFsLMZf7sgrhqKS8reVGA+/\n7tOAkR3i6m0XW3W6d8YCa51zWwDM7G1gpHPuJWC0v+x4oFc5dTOBMQGPOwJpZQs55yYBkwBSU1Nd\nTSah0CQWEkqVtb81W3L43ctzWZZdxO+P6cm1Y3tp/HkQHXsAZQuLS372a+Cc/CJ25RXw6BereTJ9\nB6sKkrj7rAG0T2pUa/GGSnWSfgYwwswa43XvHAvMNrO2zrnN/pn+jcBd5dT9GLjb/7QAcDxe379I\nVHl/YRY3vrmQBnExPHfpcMb0bhvqkKJafGwMzRs3oHnjnw/9PLpXW57/dh33fryM4x/4iptP6cu5\nwzvVq7P+Ki+45pybBbwJzMUbrhmDd1Z+g5ktBRYC7znnPgcws1Qze8qvux34O/CDf7uz9EtdkWiQ\nX1TMbVMWcfUr8+jTvhnTrhmthB/GYmOMy47sxsfXHsWhKc34y9vpXPD0LDZs3xvq0ILGnAuT32/7\nUlNTnebIlUgV2P42bN/LVa/MZeHGXfx2dDf+fGIf4mN1YdtIUVLi/X7inx8sxQE3ntiHC0d0CdvR\nQWY2xzmXWlU5tUCRWvDJkh855eGvWbs1l0kXDuPmU/op4UeYmBjjghFdmH790Qzr0oLbpi5m/KSZ\nrN2aG+rQakStUCSIikocd3+wlN++MJsurRKZ9vvRHH9ou1CHJTWQ0rwRL1x2GPeeM5Cl2bs58T9f\n8eRXayq8Umi407V3RIJk06487vl+H6t2ruHCEV245dS+NIzTBcLqAzPjV6mdOLpXG25+J527PljK\ntPRN/PucgRyS3DTU4R0QnemL1NCuvYXcP305xz3wFRv3lPDweUP4+xn9lfDroeRmCTx5USoPjR/M\n+m25nPLwDB79YhWFxRUM/A9DOtMXOUi78gp5ZsZanpmxlj35RZw8oB1HNd/F6YM6hDo0qUVmxrjB\nKYzs0Zrbpy7m3x8v58NFm7j37EH069As1OFVSWf6Igdoz75CHv5sJaP/9TkPfbaSkT1b8eEfRvPY\n+cNol6h/qWjRpmlDHj1/KI+fP5TsXfs4/ZEZPPL5SkrCvK9fZ/oi1ZSTX8Tz367jya/XsHNvIWP7\nJnPt2EPon5IU6tAkhE4a0J4R3Vvxt6mLuW/6ChZu3MUD5w6mScPwTK/hGZVIGMnNL+KF79Yz6avV\n7NhbyDF92nLt2EMY2LF5qEOTMNEisQEPjx/M4E7NufuDpZz12DdMujA1LK/3r6QvUoG8gmJenLmO\n/365hm25BYzp3YZrx/ZicCcle9mfmfGbI7vRp11Trnplrtfd8+uhHNWrTahD+xklfZEy9hUW89LM\n9Tzx5Wq25hQw+pDWXDu2F8O6aKIOqdqonq2ZetWRTHxxNpc8+z1/OakPvx3dPWyu36OkL+LbV1jM\nq99n8FjaarbsyWdUz1Y8MbYXqV1bhjo0iTCdWzXmrStH8qfJC7j7g2UsydrNPWcPDIuJ3ZX0RfAS\n/umPzGDFjzmM6N6SR84bwuHdW4U6LIlgiQ3jeOz8oTz6xSru/2QFq7fk8t8Lh9GheWgv16zxZSLA\nY1+sYsWPOTx2/lBem3iEEr4EhZlx9TGH8OSFqazdmsvpj8zg+7WhvdCwkr5EvVWbc3j8y9WcOSSF\nkwe0D3U4Ug+N7ZfMu1eNpGlCPL9+ciYvzVwfsliU9CWqOee45d10GsXH8teT+4Y6HKnHerZtyrtX\njeLIQ1pzy7uL+Os76RRUNG9jLVLSl6j2zrxMZq7Zzo0n9aFN04ahDkfquaRG8Tx98XCuHNODV2Zl\ncP5TM9myJ79OY1DSl6i1c28Bd01bypDOzTlveOdQhyNRIjbGuPHEPvzfeUNIz9zF6Y/MYOHGnXW2\nfyV9iVr/+mg5O/MKueuMAWE7G5LUX6cN6sBbV44kxoxfPvEd78zbWCf7VdKXqDRn/XZe/T6Dy0Z1\njYgrI0r9dGiHJKZePYrBnZpz3esL+Mf7S2r9gm1K+hJ1CotLuPmdRbRPSuDasb1CHY5EuVZNGvLS\nhMO5+Igu7MwrpLZ/uKsfZ0nUefabtSzL3sN/LxxGYpheCVGiS3xsDHeM609xiav1yzXoTF+iSubO\nPB78ZCVj+7bl+H7JoQ5H5Gdi6+C7JSV9iSq3T13s/T390LC5AJZIXVLSl6gxfXE2nyz5kWvHHkLH\nFo1DHY5ISCjpS1TIzS/i9qmL6Z3clMuO7BbqcERCRt9iSVR46LOVZO3ax5vnDSE+Vuc6Er3U+qXe\nW7ppN0/PWMv44Z10bXyJekr6Uq+VlDhufiedpEbx/OWkPqEORyTklPSlXnt99gbmZuzk5pP70rxx\ng1CHIxJySvpSb23NyeeeD5dxeLeWnDU0JdThiIQFJX2pt+6etpS9BUXcdWZ/jckX8SnpS7307eqt\nvD0vk8uP6kHPtk1DHY5I2KhW0jez68xssZktMrNXzSzBzI41s7lmNt/MZphZz3LqdTWzPL/MfDN7\nIviHIPJz+UXF3PLuIjq3bMzVx+zXLEWiWpXj9M0sBbgG6OecyzOzN4DxwF+Bcc65pWb2O+AW4JJy\nNrHaOTc4iDGLVGrSl2tYsyWX5y4dTkJ8bKjDEQkr1e3eiQMamVkc0BjIAhxQeiHyJH+ZSEit25rL\n/32xilMGtmdM77ahDkck7FR5pu+cyzSz+4AMIA+Y7pybbmYTgA/MLA/YDYyoYBPdzGyeX+YW59zX\nZQuY2URgIkBycjJpaWkHdTAAOTk5Naovkcs5x/2z84lxJYxtuTMk7UDtT8Jddbp3WgDjgG7ATmCy\nmV0AnAWc7JybZWY3AA8AE8pU3wR0ds5tM7NhwLtmdqhzbndgIefcJGASQGpqqhszZsxBH1BaWho1\nqS+R670FWSzaNo/bT+vHmaNCc30dtT8Jd9Xp3hkLrHXObXHOFQJvA6OAQc65WX6Z14GRZSs65/Kd\nc9v8+3OA1YCmKpKg25FbwB3vLWFAShIXHtE11OGIhK3qJP0MYISZNTZvsPOxwBIgycxKE/hxwNKy\nFc2sjZnF+ve7A4cAa4ISuUiAW6csYldeAfecPaBOJqIQiVTV6dOfZWZvAnOBImAeXlfMRuAtMysB\ndgCXAZjZ6UCqc+5vwFHAnWZWCJQAVzjnttfKkUjUem9BFu8v3MSfju/FoR2SQh2OSFir1qWVnXO3\nAbeVWfyOfytbdiow1b//FvBWDWMUqdDm3fu4dcoiBnVqzhVH9wh1OCJhT7/IlYjlnOOmt9PJKyjm\n/l8OIk7XyRepkv5LJGJNnr2Rz5Zt5s8n9qFn2yahDkckIijpS0TasH0vd76/hMO7teTSkV1DHY5I\nxFDSl4hTUuL485sLcc5x3y8HEaPROiLVpqQvEef579bx3Zpt3HpqPzq1bBzqcEQiipK+RJTVW3K4\n58NljOndhnOHdwp1OCIRR0lfIkZRcQl/fGMBCfGx/OvsgZoYReQgVGucvkg4+O9Xa5i/YScPnzeE\n5GYJoQ5HJCLpTF8iwpKs3fzn0xWcMqA9pw1sH+pwRCKWkr6EvYKiEq5/Yz5JjRrw9zM0361ITah7\nR8LeQ5+tYFn2Hp66KJWWiQ1CHY5IRNOZvoS1eRk7eDxtNb8c1pGx/ZJDHY5IxFPSl7CVV1DMH99Y\nQPukRtx6Wr9QhyNSL6h7R8LWvR8vY83WXF6ecDjNEuJDHY5IvaAzfQlL367eyrPfrOPiI7owqmfr\nUIcjUm8o6UvY2bOvkBsmL6Rb60T+clLfUIcjUq+oe0fCzj/eX8qmXXlMvmIkjRrEhjockXpFZ/oS\nVj5f9iOvz97A5Uf3YFiXFqEOR6TeUdKXsLEjt4Ab30qnT7umXDv2kFCHI1IvqXtHwsLegiL+/NZC\ndu4t4LlLh9MwTt06IrVBSV9CyjnHx4uzufO9JWTt2sctp/Tl0A5JoQ5LpN5S0peQWb8tl9umLiZt\n+Rb6tGvKQ+cNYXjXlqEOS6ReU9KXOrevsJjH01bz+JeriY8xbjmlL5eM7EpcrL5iEqltSvpSp75Y\nvpnbpy5m/ba9nDaoA7ec0lfXxhepQ0r6UieyduZx53tL+GhxNt3bJPLyhMP1S1uREFDSl1pVUFTC\n0zPW8vBnK3E4bjihNxNGd9PoHJEQUdKXWvPd6m3cOmURqzbncFy/ZP52aj86tWwc6rBEopqSvgTd\n5t37uOuDpUyZn0Wnlo145pJUjumja+GLhAMlfQmaouISXpy5ngemryC/qIRrjunJ737Rk4R4deWI\nhAslfQmK3Pwizn9qFvM37OSoXm244/RD6dY6MdRhiUgZSvpSY845/vzWQhZu3Ml/zh3MuMEdNHm5\nSJiq1q9hzOw6M1tsZovM7FUzSzCzY81srpnNN7MZZtazgro3mdkqM1tuZicEN3wJB09+vYZpCzdx\nwwl9OGNIihK+SBirMumbWQpwDZDqnOsPxALjgceB851zg4FXgFvKqdvPL3socCLwmJmpg7ce+XbV\nVu75cBkn9W/HFUd3D3U4IlKF6v7uPQ5oZGZxQGMgC3BAM399kr+srHHAa865fOfcWmAVcFjNQpZw\nkbkzj6tfnUePNk349y8H6QxfJAJU2afvnMs0s/uADCAPmO6cm25mE4APzCwP2A2MKKd6CjAz4PFG\nf5lEuH2FxVz50hwKi0p44sJhNGmor4dEIkGV/6lm1gLvjL0bsBOYbGYXAGcBJzvnZpnZDcADwISD\nCcLMJgITAZKTk0lLSzuYzQCQk5NTo/pSNecczywqYGFmEX8Y2pANi2ezIdRBhQm1Pwl31Tk9Gwus\ndc5tATCzt4FRwCDn3Cy/zOvAR+XUzQQ6BTzu6C/7GefcJGASQGpqqhszZkx1499PWloaNakvVXtp\n5nq+zlzENcf05Lrje4c6nLCi9ifhrjp9+hnACDNrbF6n7bHAEiDJzHr5ZY4DlpZTdyow3swamlk3\n4BDg+yDELSEyZ/0O7nhvMWN6t+EPY3tVXUFEwkp1+vRnmdmbwFygCJiHd1a+EXjLzEqAHcBlAGZ2\nOt5In7855xab2Rt4bxJFwFXOueLaORSpbZv37ON3L8+hfVIjHjp3CLEx+uJWJNJU69s359xtwG1l\nFr/j38qWnYp3hl/6+C7grhrEKGGgsLiEq16ey+68It7+3WEkNY4PdUgichA05EKq5a5pS/lh3Q4e\nGj+Yvu2bVV1BRMKS5qeTKr09dyPPfbuO3xzZjXGDNeJWJJIp6UulFmXu4qa30xnRvSU3ndQn1OGI\nSA0p6UuFduQWcMVLc2iZ2IBHfj1UE5eL1APq05dyFZc4rnltHpt35/PGFUfQuknDUIckIkGgpC/l\nun/6cr5euZV7zhrA4E7NQx2OiASJPq/Lfj5atInH0lZz3mGdGX9Y51CHIyJBpKQvP7Nq8x7++MYC\nBndqzu2n9wt1OCISZEr68pM9+wqZ+OIcGjWI5fELhtIwTlMfiNQ36tOXn9wweSHrt+3l5QmH0z6p\nUajDEZFaoDN9AWBexg4+WpzN9cf1YkT3VqEOR0RqiZK+APD8t+to0jCOi0d2DXUoIlKLlPSFzbv3\nMS19E79M7agZsETqOSV94eVZGRSVOC4+omuoQxGRWqakH+Xyi4p5eVYGv+jdlq6tE0MdjojUMiX9\nKPdB+ia25uRzifryRaKCkn4Uc87x7Dfr6NEmkdGHtA51OCJSB5T0o9i8DTtZuHEXl4zsijf9sYjU\nd0r6Uey5b9bRtGEcZw3tGOpQRKSOKOlHqR937+OD9E38angnEjVMUyRqKOlHqZdnrqfYOS46okuo\nQxGROqSkH4VKh2ke26ctXVppmKZINFHSj0LvL9jEttwCLhnZLdShiEgdU9IPI865OtnHc9+uo2fb\nJozqqQuriUQbJf0wsbegiFH3fM4TX66u1f3MzdhBeqaGaYpEKyX9MPHJkh/J2rWPf3+8nHkZO2pt\nP89+s46mCXGcNTSl1vYhIuFLST9MTJ2fRXKzhrRrlsB1r88nN78o6PvYtCuPDxdlM354Jxo30DBN\nkWikpB8GduQW8OWKLZwxOIUOt/YfAAAOc0lEQVT7fzWI9dv38vf3lwR9Py/PzKDEOS7S1TRFopaS\nfhiYlr6JohLHuMEpjOjeiiuO7sFrP2zgo0XZQdvHvsJiXvk+g7F9k+nUsnHQtisikUVJPwxMnZ/F\nIW2b0Ld9UwCuG9uL/inNuOnthfy4e19Q9vHegiy25xZwqa6mKRLVlPRDbOOOvXy/bjvjBnf4aTRN\ng7gY/nPuEPIKi/nT5AWUlNRsKGfpMM1eyU04ooeGaYpEMyX9EHtvwSYAxg3++Wianm2bcPMp/fh6\n5Vae/25djfYxe/0OFmft5pKR3TRMUyTKVSvpm9l1ZrbYzBaZ2atmlmBmX5vZfP+WZWbvVlC3OKDc\n1OCGH/mmzM9kaOfm5fazX3B4Z47t05Z/friM5dl7Dnofz32zjqRG8ZwxpENNQhWReqDKpG9mKcA1\nQKpzrj8QC4x3zo12zg12zg0GvgPermATeaXlnHOnBy3yemB59h6WZe/Z7yy/lJnxr3MG0iwhjj+8\nNo/8ouID3kfWzjw+WqxhmiLiqW73ThzQyMzigMZAVukKM2sGHAOUe6YvFZsyP5PYGOOUge0rLNO6\nSUP+dfZAlmXv4d8fLT/gfbw0cz3OOS7U1TRFBC+ZV8o5l2lm9wEZQB4w3Tk3PaDIGcBnzrndFWwi\nwcxmA0XAPc65/d4czGwiMBEgOTmZtLS0AzuKADk5OTWqX1dKnOP1mXn0axnDotnfVVo2FjimUxxP\nzVhLi32bOLR1bLX2UVDseOGbvQxpG8uqBd+zKghxS+Uipf1JFHPOVXoDWgCfA22AeLwz+gsC1n8I\nnF1J/RT/b3dgHdCjsv0NGzbM1cQXX3xRo/p15Ye121yXG993b8/dUK3ye/OL3DH3feEOu+sTtyM3\nv1p1Xvt+vety4/vuu9VbaxKqHIBIaX9S/wCzXRX53DlXre6dscBa59wW51whXt/9SAAzaw0cBkyr\n5E0l0/+7BkgDhhzQu1I9NWV+FgnxMRzXr121yjdqEMtD44ewPbeAv76TXuUVOZ0/6Xmfdk05vFvL\nYIQsIvVAdZJ+BjDCzBqbN97vWGCpv+4c4H3nXLm/IDKzFmbW0L/fGhgFBP/6AhGmsLiEaembGNs3\nmSYHMFVh/5Qkrj+uNx+kZ/PmnI2Vlp21djvLsvdw6ShdTVNE/qfKpO+cmwW8CcwF0v06k/zV44FX\nA8ubWaqZPeU/7AvMNrMFwBd4ffpRn/RnrNzK9twCzqhg1E5lJh7VncO6teT2qYtZvy23wnLPfbOO\n5o3jKxwZJCLRqVqjd5xztznn+jjn+jvnLnTO5fvLxzjnPipTdrZzboJ//1vn3ADn3CD/79PBP4TI\n8+78TJo3jueoXm0OuG5sjPHguYOJiTGue30+RcUl+5XZuGMv05dkc95hnUmIr96XviISHfSL3Dq2\nt6CI6Yt/5OQB7WkQd3BPf0rzRvzjjP7MzdjJo1/sP+nKizPXY2ZcMELDNEXk55T069gnS34kr7CY\ncYNq9uvYcYNTOGNwBx7+fCVzAyZdySso5rXvN3DCocmkNG9U03BFpJ5R0q9jU+Zn0SEpgeFdaz6i\n5s4z+v806UqOP+nKu/Mz2ZVXqEnPRaRcSvp1aHtuAV+t2MJpgzsQE1PzETXNEuJ54FeDyNi+lzvf\nW+xdTfObdfRr34zhXVsEIWIRqW90MZY69NNkKYOCN6Lm8O6tuPLoHjyWtprGDeJY/uMe7j1noIZp\niki5dKZfh6bOz6RX8v8mSwmWa8f2YkBKEs99u46WiQ04vYbfF4hI/aWkX0c27tjLD+t2MG5wStDP\nwhvExfCf8YNp2jCOy0Z11TBNEamQunfqyNQF3oVJa+ssvEebJsy6+VgaKeGLSCWU9OvI1PlZDOvS\nolYnJdf18kWkKureqQPLsnf7k6Wor11EQktJvw5MmZ9FbIxx8oCKJ0sREakLSvq1rKTEMXV+FqMP\naU3rJg1DHY6IRDkl/Vo2J2MHmTvz1LUjImFBSb+WTZmfSUJ8DMdXc7IUEZHapKRfiwqLS5i2cBPH\n9WtH4gFMliIiUluU9GvR1yu3sGNvYY2vqCkiEixK+rVoyvysg54sRUSkNijp15Lc/JpPliIiEmzK\nRrXk06XeZCkHMw+uiEhtUdKvJe/Oy6RDUgKpXXRdexEJH0r6tWBbTj5frdwatMlSRESCRUm/Fnyw\nKJviEqeuHREJO0r6tWDKPG+ylD7tgjtZiohITSnpB9mG7XuZvb52JksREakpJf0gq+3JUkREakJJ\nP8jqYrIUEZGDpaQfRMuyd7P8xz2coStqikiYUtIPonfnabIUEQlvSvpBUlBUwnsLvMlSWmmyFBEJ\nU0r6QeCc49Z3F5G5M4+Lj+ga6nBERCqkpB8Ez327jtdnb+DqX/TkF33ahjocEZEKVSvpm9l1ZrbY\nzBaZ2atmlmBmX5vZfP+WZWbvVlD3YjNb6d8uDm74off1yi38/f0lHN8vmeuP6xXqcEREKlXldE5m\nlgJcA/RzzuWZ2RvAeOfc6IAybwFTyqnbErgNSAUcMMfMpjrndgTrAEJpzZYcrnp5Lr2Sm/LguYN1\nnR0RCXvV7d6JAxqZWRzQGMgqXWFmzYBjgPLO9E8APnHObfcT/SfAiTULOTzsyitkwguziYuN4cmL\nUjUdoohEhCqTvnMuE7gPyAA2Abucc9MDipwBfOac211O9RRgQ8Djjf6yiFZc4rjm1XlkbNvL4+cP\n1Q+xRCRiVKd7pwUwDugG7AQmm9kFzrmX/CLnAU/VJAgzmwhMBEhOTiYtLe2gt5WTk1Oj+tXx6rJ8\nvlxXxCWHNiAvI520jFrdnUSQumh/IjVRnT6JscBa59wWADN7GxgJvGRmrYHDgDMrqJsJjAl43BFI\nK1vIOTcJmASQmprqxowZU7ZItaWlpVGT+lWZPHsDH69byMVHdOH2cf1rbT8SmWq7/YnUVHX69DOA\nEWbW2LzLRh4LLPXXnQO875zbV0Hdj4HjzayF/4nheH9ZRJqzfjs3v7OIUT1bceup/UIdjojIAatO\nn/4s4E1gLpDu15nkrx4PvBpY3sxSzewpv+524O/AD/7tTn9ZxMnamcflL86lQ/MEHv31UOJi9RMH\nEYk81Rpy4py7DW/oZdnlY8pZNhuYEPD4GeCZgw8x9PYWFPHbF2aTX1jMaxMPp3njBqEOSUTkoGic\nYRWcc9wweSFLNu3mmYuH07OtZsMSkcilPooqPPzZKqalb+Kmk/roEgsiEvGU9CvxYfomHvx0BWcN\nTeG3o7uHOhwRkRpT0q/A4qxdXP/GAoZ0bs7dZw7QfLciUi8o6Zdja04+E1+YQ/PG8fz3wmEkxMeG\nOiQRkaDQF7llFBSVcMWLc9iWm8/ky0fStmlCqEMSEQkaJf0ApZOhzF6/g/87bwgDOiaFOiQRkaBS\n906AZ7/xJkP5/TE9OW2QJjcXkfpHSd/38qz1/H3aEk44NJnrxmoyFBGpn6K+e8c5x8OfreLBT1dw\nTJ+2/OfcIZoMRUTqrahO+sUljtunLubFmes5e2hH7jl7APG6po6I1GNRm/Tzi4q5/vUFTEvfxOVH\nd+cvJ/bRWHwRqfeiMunn5Bdx+Yuz+WbVNm4+uS+/PUq/thWR6BB1SX9rTj6XPPs9yzbt4YFfDeKs\noR1DHZKISJ2JqqS/YfteLnx6Ftm79/Hkxan8orcuoCYi0SVqkv6SrN1c/Oz3FBSV8PKEEQzr0iLU\nIYmI1LmoSPqz1mxjwvOzaZIQxytXHMEhybomvohEp3qf9D9enM3vX51H55aNeeGyw+jQvFGoQxIR\nCZl6nfRf+z6Dv76TzqBOzXnm4uG0SNQ0hyIS3epl0nfO8egXq7hv+grG9G7DY+cPpXGDenmoIiIH\npN5lwhLnuOO9JTz37TrOHJLCvecM1K9sRUR89SrpFxSV8N8F+czKXseEI7vx15P76jo6IiIB6k3S\nz8kv4sqX5jAru5ibTurD5Uf3CHVIIiJhp970e+zZV8jarbn8pn8DJXwRkQrUm6TfPqkRn15/NKM7\nxoc6FBGRsFVvkj6gCcxFRKpQr5K+iIhUTklfRCSKKOmLiEQRJX0RkSiipC8iEkWU9EVEooiSvohI\nFDHnXKhj+Bkz2wWsrKRIErCrkvWtga1BDapuVXV84b6/mm7vQOsfSPnqlK1pGbW/0O6vrtvfgdQJ\nVrmK1ndxzrWpcuvOubC6AZNquH52qI+hNo8/3PdX0+0daP0DKV+dsjUto/YX2v3Vdfs7kDrBKlfT\nYwzH7p33arg+0tX18QV7fzXd3oHWP5Dy1SkbrDKRSu2v9uoEq1yNjjHsundqysxmO+dSQx2HRCe1\nPwl34XimX1OTQh2ARDW1Pwlr9e5MX0REKlYfz/RFRKQCSvoiIlFESV9EJIpEVdI3s0Qzm21mp4Y6\nFok+ZtbXzJ4wszfN7MpQxyPRKSKSvpk9Y2abzWxRmeUnmtlyM1tlZn+pxqZuBN6onSilPgtGG3TO\nLXXOXQH8ChhVm/GKVCQiRu+Y2VFADvCCc66/vywWWAEcB2wEfgDOA2KBf5bZxGXAIKAVkABsdc69\nXzfRS30QjDbonNtsZqcDVwIvOudeqav4RUrFhTqA6nDOfWVmXcssPgxY5ZxbA2BmrwHjnHP/BPbr\nvjGzMUAi0A/IM7MPnHMltRm31B/BaIP+dqYCU81sGqCkL3UuIpJ+BVKADQGPNwKHV1TYOXczgJld\ngnemr4QvNXVAbdA/8TgLaAh8UKuRiVQgkpP+QXHOPRfqGCQ6OefSgLQQhyFRLiK+yK1AJtAp4HFH\nf5lIXVEblIgTyUn/B+AQM+tmZg2A8cDUEMck0UVtUCJORCR9M3sV+A7obWYbzew3zrki4GrgY2Ap\n8IZzbnEo45T6S21Q6ouIGLIpIiLBERFn+iIiEhxK+iIiUURJX0Qkiijpi4hEESV9EZEooqQvIhJF\nlPRFRKKIkr6ISBRR0hcRiSL/D/YtRRVbcXBSAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x116396750>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (logistic)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's see if the same technique will improve the prediction of the 1-layer neural network:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \\\n",
    "      beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2))\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 667.111572\n",
      "Minibatch accuracy: 8.6%\n",
      "Validation accuracy: 32.0%\n",
      "Minibatch loss at step 500: 197.072830\n",
      "Minibatch accuracy: 81.2%\n",
      "Validation accuracy: 80.0%\n",
      "Minibatch loss at step 1000: 115.229164\n",
      "Minibatch accuracy: 78.9%\n",
      "Validation accuracy: 81.2%\n",
      "Minibatch loss at step 1500: 68.336792\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 83.0%\n",
      "Minibatch loss at step 2000: 41.158184\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 84.7%\n",
      "Minibatch loss at step 2500: 25.163036\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 85.3%\n",
      "Minibatch loss at step 3000: 15.478530\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 86.5%\n",
      "Test accuracy: 93.0%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Finally something above 90%! I will also plot the final accuracy by the L2 parameter to find the best value."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:    \n",
    "  with tf.Session(graph=graph) as session:\n",
    "    tf.initialize_all_variables().run()\n",
    "    for step in range(num_steps):\n",
    "      # Pick an offset within the training data, which has been randomized.\n",
    "      # Note: we could use better randomization across epochs.\n",
    "      offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "      # Generate a minibatch.\n",
    "      batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "      batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "      # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "      # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "      # and the value is the numpy array to feed to it.\n",
    "      feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "      _, l, predictions = session.run(\n",
    "        [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXQAAAEMCAYAAADUEk3/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xd8FHX+x/HXJ52Q0ENC7y0giEQp\nggYVLCCoKBbsBVHPcqd3+rs7eznPfuopNlQURLGcKBwCQvCUIkGKQOiQ0HsISUj//v6YCa4hZQm7\nO1s+z8cjjySzM7OfmZ197+x3vjMjxhiUUkoFvjCnC1BKKeUZGuhKKRUkNNCVUipIaKArpVSQ0EBX\nSqkgoYGulFJBQgNdeZSIxIiIEZGWTtdyokRkkYhcexLTbxKR/h6uKVpEckWkuSfn6zL/l0VkXC2n\nvUBENnq6JqeJyOkikuZ0HbURcoFuvznKf8pE5KjL/2NOYr4nFQYq8BljOhhjFp7MPCpuR8aYQmNM\nnDFm58lXeNxztQAuBybY/9cVkS9FJNP+UO7n6ef0N5XtgBhjlgBlIjLEwdJqJeQC3X5zxBlj4oAs\n4GKXYZOcrs9bRCTC6RpOlr8ug7/W5Yabgf8YY4rs/w2QBlwNHHKqqOr4cF1PAm730XN5jjEmZH+A\nrcB5FYaFAw8Dm4H9WC9sA/uxusAU4CCQDSwGGgIvAqVAAZALvFjJc0UAXwB77GnnAV1cHq8LvAps\nAw4D84EI+7FUYJE9PAu4xh6+CLjWZR7jgDn23zFYb9A7gE3AWnv4m8B2IAf4GehXocZH7WXPAZYA\nScB7wNMVlmcWcEcly1n+vH+w1+8+4GlAgFh7vp1cxm8J5Jev4wrzGgfMBf6NFTB/t4ffDqyzX4fp\nQAuXaYYBG+x1/IrrOgKeBd51GbcrUOLyv+u4XbHC7aC9DB8C8S7j7gYeAFYD+S7DBmJtQ7kuP3n2\nOkkCEoD/2vM8CHwNNLOnP247clmfLe1xGgGT7em3AH8BxGV9fY+1HWXbr/t5FderyzIsAC6v4rH9\nrttGFeNcAGx0+f8Ru6YjwCpgmD28xtcduBRYadf9PyC5unVdxTY31l7mQ8DLFcapdJvBeg8Y+zXK\nBS6xh3ewlyPc6Zw6oUxzugBHF77yQH/Q3qCa2xvKB8D79mP3Ap8DdbDC73Sgrv3Y78K1kueKAK4H\n4uz5vgkscnn8PayQTLIDYZD9u6O9oY2y55EA9KrsOak80KcDDYA69vDrsT6EIoG/YX2ARNqPPQws\ns58zDOhtT3uW/UYtD47m9puxUSXLWf6839nTtsP6gCgPygnA4xXW99Qq1tk4oAS4zV4XdYArgQyg\ns70MTwHz7PGb2etquP3YX4Biah/o5wBR9muyCHjWZdzdWB94zV3W7W5gYCXL8RIwx16GRGCkvSz1\nsQJ9SmU1VFif5YH+GTDV3o462q/LGJf1VWy/xuHAH4Gt1WyTR4BTqnisNoF+pf0ahAHX2fNvUtPr\nDvQDdgF97LrHAuv5bYfmuHVdxTb3JVDP3uaygVSXuqraZn63fivMtwjo7HROnciP4wU4uvCVB/oW\n4EyX/9thhZcAd2LtOfeoZF7VBnol4ycBZfYGFWm/EbtUMt7jwCdVzMOdQB9QTQ1iL1sX+/9M4Pwq\nxtsMDLL/fwD4sop5lj9vqsuwPwHT7b/PrhACvwIjqpjXOGB9hWHzsAPM/r983SXaQTDP5bEwYC+1\nCPRKarkKWOjy/27sb0oVhg2sMOx6YCOVfPjZj/cDdlXzmh4LHCAaaw++vcvj9wIzXdbXKpfHGtnT\nVvbtJ9x+rG0VdZ1woFfy+Nry7am61x14H/hbhWkzgb5VresqtrkUl2HTgPvc2GaqC/QDwBnVrQN/\n+wm5NvTqiIgArYAZIpItItlYe6xhQGOsvej5wOcisl1EnhGRcDfnHSEiL4rIZhHJwdrYxZ5vM6y9\n702VTNqqiuHu2lahjv8TkXUichjrq2kM0MRe9haVPZextu6JQPnBumuBj07geTOx9q4AfgDCRaS/\niJyKtez/dbd+oA0w3uX12Ye1F9/Sfo5j4xtjyoAdNdRZKRFpLiJTRWSH/Xq9CzSpobaK8+iL1Wwy\n0hhz0B4WLyITRCTLnu+sSuZblSSsbTHLZVgm1utWbrfL3/n277iKMzLGlGLtQce788Qi0tml88D+\nKsa5RURWurw2Hflt2ap73dsAfy2fzp42ocJyVbuubRWXvXy5q9tmqhOPtacfMDTQXdjBtQM4xxjT\nwOUnxhiz31g9Dh4xxnTFaoa4AmvPDaxP+ercBAwBBmN91e5qDxesr5slWO12FW2rYjhY7X6xLv8n\nVbZY5X/YR+3vxmqvbIC1B3cUqymlfNmreq6JwOUi0gfrQ2Z6FeOVa+Xyd2tgJxz34XAdVnNDcTXz\nqbhetwE3Vnh96hhjlmKtx2NvUhEJ4/eh4M76Kve8PX4PY0w94Fas16q62o6xuxl+AdxqjFnt8tBD\ndo2n2/MdWmG+1W1Hu7G+1bV2GdaaWn5oYbVZd3ZnRGPMevNb54HjPoBEpDPwGta3pEbGmAZY30zE\nnr66130b8EiF1zTWGPOlawm1XMby+Ve1zVQ6XxHpABRycjtTPqeBfrzxwLMi0gpARJqKyMX23+eJ\nSLIdFDlYIVxmT7cHaF/NfOOxDnYdwDoA+lT5A/aGPRH4l4gkiki4iAy09/4/AoaLyKX2Xn6CiPS0\nJ12OFbIxItIVuLGGZYvH+qq5D6tt+AmsPfRy7wLPiEh7sfQWkQZ2jZuBNVhfjz81v/WMqMqDIlJf\nRNpiHSD91OWxicBorN4UE2uYT0Xjgb+LSBcAEWkoIqPsx6YBfUXkIrs3xJ+wjheUWw4MFpEWItIQ\nqx23KvFY7fE5ItLanpdbRCQKqz33LWPM15XMNx/IFpEmwN8rPF7ldmSMKQS+wnqN6tqhcy/wsbu1\nVTADqynEtfZoESnfJqJc/q5JHNZ7YR8QZvdt71hhnKpe97eBu0Ukxd7u4kRkhIjE4hlVbjP2Oj3M\n8ev8bGC2/U0mYGigH+85rANYc0XkCFZPgNPsx1pgHcQqP4o/g9+C6mXgehE5JCLPVTLf97A29t1Y\n7Yc/Vnj8Hqy9gWVYof8k1p7zRqyDaH/FOkKfDnR3qTXCnu/b1PzG/gbrq+8mfuvFs8/l8Wex9rzn\nYn1gjcdqty33IXAKNTe3YM9nhV3vVNfajDGbsHocHDHG/OzGvI4xxnwCvA58aTdZLMf65oMxZhdW\nWLxqL1tLrHVd6FLTt1gfTIuA/1TzVI9g9Vg5jBWiX5xAme2Bvlgfaq7nPTQFXsBqhjiAtQ3MqDBt\nTdtReVe6TKzX6V2snli18QFwif0BVC4T61tbY6zmxaMiUt03GQCMMb9gbS/pWN+U2tl/u45T6etu\njPkJa/t/C6uJYz1wDSe3V+76vFVuM7ZHgKl2k8wIe9gYe3kCSnmvBaVqJCJDgTeMMRX3vGozr8nA\nGmPMUzWOXPvniMD6AL3YnOQJP8FKRF7COvDsk/Dyxet+skTkdOAFY8zZNY7sZzTQlVtcmhF+MMZU\ntud4IvPqCPwCdDPG1Lb9t6p5X4j1raoQq1vmDUBHN5qIlJd583VXFm1yUTWyeyUcwmr//fdJzus5\nrGalJ7z0pi7vM78XOBe4VMPceT543RW6h66UUkFD99CVUipIaKArpVSQ8OlV4po0aWLatm1bq2nz\n8vKoW7euZwtSyk26/SknLV26dL8xJqGm8Xwa6G3btiU9Pb3mESuRlpZGamqqZwtSyk26/SkniUim\nO+Npk4tSSgUJDXSllAoSGuhKKRUkNNCVUipIaKArpVSQ0EBXSqkgoYGulJflF5WwLOsQepkN5W0+\n7YeuVCgpKS1j6tLtvDx7PXuPFHLTmW35+7BkwsMq3vhIKc/QQFfKw4wxzFqzh+dmrmXTvjz6tGnI\nOV2b8v5PW9l9uICXrzyVmEi3bkWr1AnRQFfKg5ZmHuQfM9aSnnmI9gl1eeu6PgxNTkRE6Ng0jqdn\nZLDv3cW8e0MKDWKjap6hUidAA10pD9i4N5fnv1vLd6v3kBAfzTOXnsLolJZEhP92mOrWQe1pVr8O\nf/x0OaPeXMAHN51Bq0aeum2mUhroSp2UvTkFvDxnA5+lbyMmIoz7h3TmlkHtiI2q/K01rGczmsRF\ncdvEdC57cwHv33g6PVrU93HVKlhpLxelauFIQTEvzlrH2c+nMTV9G9f1a8P8vwzm7nM7VRnm5fq2\nb8zndwwgMky48q2F/LB+X7XjK+Uu3UNX6gQUlZQxeXEmr87dyMG8Iob3bMafz+9Cm8Yndmndzonx\nfHXXmdww4Wdu/mAJz47qyeV9WnqpahUqNNCVckOZMUxbsZMXvltH1sF8+rdvzEMXdqVXqwa1nmdi\nvRimjuvPuI+X8sDUFew+fJS7BndERLs1qtrRQFeqBtsP5fPkwgK25Cyja1I87990OqmdEzwSvPEx\nkbx/4xk8+MVKXpi1np2HC3hiRPffHUxVyl0a6ErV4JU5G9iRW8bzl/fkstNaevzEoKiIMF4a3Yuk\n+jG8mbaJvTkFvHp17xrb4pWqSHcDlKrGviOFTFu+k4EtI7gipZXXzvIUER68oCtPjuzO3LV7ufqd\nxezPLfTKc6ngpYGuVDUmLc6kqLSMIa0jffJ81/Vvy/hr+7B2Vw6j3lzA1v15PnleFRw00JWqQmFJ\nKR8vymRwlwSaxfnurTK0exKTb+tHztFiRr25gOXbsn323CqwaaArVYVvVuxif24RNw9s5/Pn7tOm\nIV/cMYDY6HCuensh32fs8XkNKvBooCtVCWMME37cQqemcQzs2MSRGtonxPHlHWfSqWk8t01M5/Ol\n2x2pQwUODXSlKrF4y0HW7Mrh5oHtHO0XnhAfzZSx/ejfoTEPTF3Bhwu2OlaL8n8a6EpVYsKPW2gY\nG8mlvVs4XQp1oyN474bTGZKcyKPTVvPveRv1ZhmqUhroSlWQdSCf2Rl7uKZva7+5bnlMZDhvjDmN\nS3u34Pnv1vHszLUa6uo4euaCUhV8sGAr4SJc16+t06X8TmR4GC9e0Yu60eG8NX8zuQUlPDmyB2F6\nByRl00BXysWRgmI+S9/GsJ7NSKof43Q5xwkLE54c2YO46EjGz99EbmEJL1zRi0i9VIBCA12p35ma\nvp3cwhJuOtP3XRXdJSI8dGFX4mMieP67deQVlvL6Nb39pnlIOcetj3URuVdEVonIahG5zx72pIis\nFJHlIjJLRJp7t1SlvKu0zPDBgq30adOQU0/iKoq+ctfgjjwxsjtzMvZwy4dLyCsscbok5bAaA11E\negC3AWcAvYDhItIReN4Y09MYcyrwLfCIVytVysu+z9hD1sF8bvbjvfOKru/flpdG92LR5oNc+95i\nDucXO12ScpA7e+jdgMXGmHxjTAkwH7jMGJPjMk5dQA+5q4A24acttGhQh/O7Jzpdygm57LSW/Pua\n01i9I4er3lnEviN6Ua9QJTV1fRKRbsDXQH/gKPA9kG6MuVtEngauBw4Dg40xx91LS0TGAmMBEhMT\n+0yZMqVWhebm5hIXF1eraZWqSWZOKY8uKODKLlFc2O74C3EFwva3an8pry4roFG08OfTY2hcRw+U\nBovBgwcvNcak1DRejYEOICK3AHcCecBqoNAYc5/L4/8HxBhjHq1uPikpKSY9Pb3G56tMWloaqamp\ntZpWqZo8MHUFM37dxcKHzqV+7PGBHijbX/rWg9z0wRLqxUTy8a19adfkxG6Np/yTiLgV6G59hBtj\n3jPG9DHGnAUcAtZXGGUSMOrEy1TKeeXXPL+8T8tKwzyQpLRtxCe39eNocSlXjF9Ixq6cmidSQcPd\nXi5N7d+tgcuAySLSyWWUkcBaz5enlPeVX/P8xgFtnS7FI3q0qM9nt/cnIky46u1FLMs65HRJykfc\nbWT7QkTWAN8AdxljsoFn7a6MK4GhwL3eKlIpbym/5vk5XZvSPsG/28hPRMemcUwd158GsZGMeXcx\nCzbtd7ok5QPuNrkMMsYkG2N6GWO+t4eNMsb0sLsuXmyM2eHdUpXyvGPXPA+groruatUolqm396dV\nw1hunLCEV+asp6C41OmylBfpYXAVssqved45MY4zOzZ2uhyvaFovhk9v78f5PZJ4Zc4Ghr78g94s\nI4hpoKuQdeya52c6e81zb2sQG8VrV/dm8m19iYoI45YP07n1wyVkHch3ujTlYRroKmSVX/P8Ej+4\n5rkvDOjQhP/eO4i/XdSNhZsOcN7L87UZJshooKuQVH7N8zF924TURa0iw8O47az2fH9/Kud3t5ph\nhrw8X5thgoQGugpJx6553r+N06U4Iql+zLFmmOiIcG2GCRIa6CrklF/zfHjPZiTW879rnvuSNsME\nFw10FXI+s695fvPA4OuqWBuuzTAXaDNMQNNAVyHFuub5FlLaNKRnS/+/5rkvJdWP4VW7GSbGboa5\n5QNthgkkGugqpMzJ2MO2g0d177waAzo0YYbdDLNo82/NMCWlZU6Xpmqgga5CyoQfrWueD00OrGue\n+1plvWH+MHkZhSXatu7PNNBVyFi14zCLtxzkhgFtiNCbKrulvDfMI8OTmbl6N7d+mE5+kd7qzl/p\nVq1Cxvs/bSU2KpwrU1o7XUrAuXlgO567vCc/bdzP9e/9TE6B3urOH2mgq5Cw90gB36wIjmueO2V0\nSitev+Y0VmzP5uq3F3EgV29152800FVImLQoi6LSMm4Kwqsq+tJFpzTjnetT2LQvl9FvLWT34QKn\nS1IuNNBV0CsoLmXS4kzO7dpUb8nmAaldmjLx5r7sySnk8vELyDyQ53RJyqaBroLex4syrWuea1dF\njzmjXSMm39aXvMISrhi/kPV7jjhdkkIDXQW5pZkHefa/azm3a1MGdAjOa547pWfLBnx2e38ARr+1\nkBXbsh2uSGmgq6C170ghd076heYN6vDSlacG9TXPndIpMZ7Pxw0gPiaCMe8uZvHmA06XFNI00FVQ\nKikt4+5PfiE7v5jx1/ahfh3t2eItrRvHMvX2ASTVj+H6CT8zb+1ep0sKWRroKig9P2sdizYf5JlL\nTyG5eT2nywl6SfVj+HRsPzolxnHbxHS+XbnT6ZJCkga6CjozV+3mrfmbGdO3NaP6tHS6nJDROC6a\nybf1o3frBtzzyTI+W7LN6ZJCjga6Ciqb9+XywNQV9GrVgEcuTna6nJBTLyaSiTf3ZWCnBP7yxUre\n+3GL0yWFFA10FTTyi0oY9/FSIsOFN8acRnRE6Nxazp/UiQrnnev7cGGPJJ78dg2vzFmPMcbpskJC\nhNMFKOUJxhge+uJXNuzNZeLNZ9CiQR2nSwpp0RHhvHZ1bx768ldembOBg3lFXNA9ifiYSOJiIoi3\nf/RD17M00FVQ+HDBVqat2MkDQzszqFOC0+UoICI8jOdG9SQuOoIPFmxl4sLM48aJCg87Fu5xMRHE\nR0ce+7tejP13dAQtG8ZyYY8kwsK062l1NNBVwFuaeZCnpmdwXrem3Jna0elylIuwMOHRi5O5tl9r\n9h0pIrewhCMFxfbvEnIKisktsP4ufyzrYD5HCn4br8xurXl8RHduGNDW0eXxdxroKqCVnzzUomEd\nXhx9qu7B+SERoWPTeDo2PfFpjTHkF5Uy9qN0Xp6znpGnNqdBbJTniwwSelBUBazyk4cOHy3mzTF6\n8lAwEhHqRkfw8PBkco4W88qcDU6X5Nc00FXAKj956OlL9OShYNc1qR7X9G3NR4sy2aAXAquSBroK\nSDNX7dKTh0LMn4Z0oW5UOE9Oz9BukFXQQFcBZ9O+XB6YulJPHgoxjepGce95nflh/T7mrdPrxVRG\nA10FlPyiEu74eClREWF68lAIur5/G9on1OWpbzMoKilzuhy/o4GuAkb5yUMb9+by6lW99eShEBQZ\nHsbDw5LZvD+PiQu3Ol2O39FAVwGj/OSh+4d2YWCnJk6XoxyS2iWBszon8K/vN+iNqivQQFcBwfXk\noTvO7uB0OcpBIsLDw7qRX1TKy3PWO12OX9FAV35vf66ePKR+r1NiPNf1a8PkxVms3Z3jdDl+QwNd\n+b035m3iYF6Rnjykfue+8zpRr04kT367Rrsx2twKdBG5V0RWichqEbnPHva8iKwVkZUi8pWINPBu\nqSoUHS0q5fOl2zi/e5KePKR+p0FsFH88rzM/bTzA7DV7nC7HL9QY6CLSA7gNOAPoBQwXkY7AbKCH\nMaYnsB74P28WqkLTtyt3klNQwpi+bZwuRfmhMX1b06lpHE/PyKCwpNTpchznzh56N2CxMSbfGFMC\nzAcuM8bMsv8HWATo6XrK4yYtzqJDQl36tW/kdCnKD0WEh/Hw8GQyD+TzwU9bnS7Hce5cbXEV8LSI\nNAaOAhcB6RXGuRn4tLKJRWQsMBYgMTGRtLS0WhWam5tb62lVYMrMKWX5tgKu6RrF/PnzHa1Ftz//\n1ishnJdnrSWpIIv60aF70LzGQDfGZIjIP4FZQB6wHDj23UZE/gaUAJOqmP5t4G2AlJQUk5qaWqtC\n09LSqO20KjD99atfiY7YzoOjU6kf6+zBUN3+/Fvr7rmc/8oPLMxtzLPn93S6HMe4dVDUGPOeMaaP\nMeYs4BBWmzkiciMwHBhj9DCz8qAjBcX8Z9kOLu7V3PEwV/6vfUIcN/Rvy6fp21i147DT5TjG3V4u\nTe3frYHLgMkicgHwF2CEMSbfeyWqUPSf5TvJLyrl2n56MFS55+5zO9EwNoonQrgbo7v90L8QkTXA\nN8Bdxphs4HUgHpgtIstFZLy3ilShxRjDpEWZdG9ej14t6ztdjgoQ9etEcv/Qzvy85SAzV+12uhxH\nuHULOmPMoEqG6c0blVf8kpXN2t1HeObSUxAJ3QNc6sRddXprPlqYydMzMhjctSkxkaF1NU49U1T5\nnUmLM4mLjmDEqc2dLkUFmPAw4ZHhyWw/dJT3ftzidDk+p4Gu/Ep2fhHfrtzFJb2bExet9zBXJ25A\nxyYMTU7k3/M2sjenwOlyfEoDXfmVz5dup6ikTM8MVSflb8O6UVJqeO67dU6X4lMa6MpvGGOYtDiL\nPm0a0q2ZXrdF1V6bxnW5aWBbPl+6nZXbs50ux2c00JXfWLjpAFv25zGmb2unS1FB4A+DO9IkLpon\nvgmdbowa6MpvTFqcRYPYSC46pZnTpaggEB8TyZ/P70x65iG+WbnL6XJ8QgNd+YW9Rwr4bvVurujT\nMuS6minvubxPK7o3r8ezMzLILyqpeYIAp4Gu/MJnS7ZRUma4+gxtblGeEx4mPDaiO7tyCrhvynJK\ny4K76UUDXTmutMzwyc/bOLNjY9onxDldjgoyp7dtxGMXd2fWmj1Bf3cj7eirHDd//V52ZB/lb8O6\nOV2KClI3DGjL9kP5vPO/LbRsWIdbB7V3uiSv0EBXjpu0KIuE+GiGJCc6XYoKYv93YTd2ZB/l6RkZ\nNG9QJygPvmuTi3LU9kP5zF23l6tOb0VkuG6OynvCwoSXRp9Kn9YNue/T5aRvPeh0SR6n7yDlqCk/\nb0OAq/RgqPKBmMhw3rk+hRYN6nDrxHQ278t1uiSP0kBXjikuLWPKkm0M7tKUFg3qOF2OChEN60bx\nwU2nEy7Cje8vYX9uodMleYwGunLM7DV72J9byJh+uneufKtN47q8e0MKe48UcMuH6RwtKq15ogCg\nga4cM2lxJi0a1OHszk2dLkWFoN6tG/LqVb1ZuT2be6YsC4o+6hroyhGb9+Xy08YDXNO3NeFhehML\n5Yyh3ZN47OLuzF6zhye+WR3wfdS126JyxCc/ZxERJlyR0tLpUlSIc+2j3qpRbED3UddAVz5XUFzK\n1KXbOb97Ek3jY5wuR6ljfdSfmp5Bs/p1GNYzMPuoa5OL8rkZv+4iO79YL5Or/EZ5H/WUNg3542eB\n20ddA1353KTFWbRvUpf+HRo7XYpSxwRDH3UNdOVTGbtyWJp5iGv6tkZED4Yq/xLofdQ10JVPTV6c\nRVREGJf30YOhyj/9ro/6B0sC6jrqGujKZ/IKS/hq2Q6G92xGg9gop8tRqkrH+qjvOMw9nwTOddQ1\n0JXPfL18J7mFJYzp28bpUpSqUXkf9TkZe3g8QPqoa7dF5RPGGCYtzqRbs3qc1rqB0+Uo5RbXPuop\nbRsxoldzp0uqlu6hK59Ysf0wq3fmMEYPhqoA89CF3ejRoh5PT19DXqF/t6droCufmLQok7pR4VzS\nu4XTpSh1QsLDhMdH9GBPTiGvz9vodDnV0kBXXnc4v5hvVu5kZO8WxEVrK58KPH3aNGTUaS1593+b\n/bp/uga68qrCklIembaKguIyPTNUBbQHL+xCTEQ4j33jvzea1kBXXrPvSCHXvLOYr5fv5P4hnene\nvL7TJSlVa03jY7hvSGd+WL+P2Wv2OF1OpTTQlVes3nmYka//yOqdh3ljzGncfW4np0tS6qRd378N\nnRPjeOLbNRQU+99NMTTQlcfNXLWLy99ciAE+HzcgKO+urkJTZHgYj43ozvZDRxk/f5PT5RxHA115\njDGG177fwLiPf6FLUjxf/+FMerTQZhYVXAZ0aMLwns14M20T2w7mO13O72igK48oKC7lninLeXH2\nei7t3YIpY/vptc5V0PrbsG6EifDkt2ucLuV3NNDVSdt9uIDRby3k25U7efCCrrw0uhcxkeFOl6WU\n1zSrX4e7z+3IrDV7mL9+n9PlHONWoIvIvSKySkRWi8h99rAr7P/LRCTFu2Uqf7ViWzYjXv+RTXtz\nefu6FO5I7aBngqqQcMvAdrRrUpfHp62mqKTM6XIANwJdRHoAtwFnAL2A4SLSEVgFXAb84NUKld/6\nevkORr+1kKiIML64cwBDkhOdLkkpn4mOCOfRi5PZvD+PCT9tcbocwL099G7AYmNMvjGmBJgPXGaM\nyTDGrPNuecoflZUZXvhuHfdOWU6vlg34+q4z6ZpUz+mylPK51C5NGZKcyKvfb2D34QKny3Er0FcB\ng0SksYjEAhcBrbxblvJXeYUljPt4Ka/P28iVKa34+Na+NI6LdrospRzz8LBkSsoMz8zIcLqUmi+f\na4zJEJF/ArOAPGA54HaPehEZC4wFSExMJC0trVaF5ubm1npa5Rn7j5bxr18K2X6kjGu6RjGk8QEW\n/BgaLW66/anqXNgmnK9X7CQszlprAAAPlklEQVQ5+iBdGznXIUBO9JoEIvIMsN0Y84b9fxrwgDEm\nvaZpU1JSTHp6jaNVKi0tjdTU1FpNq05e+taD3P7RUopKy3j9mtM4u3OC0yX5lG5/qjpHi0o576X5\nxEVHMP2egUSEe7YDoYgsNcbU2PnE3V4uTe3frbEOhE4+ufJUoDhSUMy/5mzg6ncWER8TwVd3nhly\nYa5UTepEhfPw8GTW7TnCR4syHavD3WuZfiEijYFi4C5jTLaIXAq8BiQA00VkuTHmfG8Vqnwrv6iE\niQszGT9/E9n5xVzYI4l/XHaK3gtUqSqc3z2RQZ2a8NKs9Qzv2ZyEeN8fW3Ir0I0xgyoZ9hXwlccr\nUo4qKC5l8uIs3kjbyP7cIlK7JPCnIZ3p2VJvG6dUdUSEx0Z054JXfuC5mWt5/opePq9B7zagACgq\nKeOz9G28Pncju3MKGNChMW9d15k+bRo5XZpSAaNDQhw3D2zHW/M3c03f1vRu3dCnz6+BHuJKSsv4\natkO/vX9BrYfOkqfNg15aXQvBnRs4nRpSgWku8/pxH+W7eCRr1fzn7vOJDzMd2dOa6CHqLIywzcr\nd/KvORvYvD+PU1rU58lLepDaOUFP3VfqJMRFR/DXi7px75TlfJa+javP8N2dujTQQ4wxhu9W7+al\n2etZvyeXrknxvHVdH4YmJ2qQK+UhI3o1Z9LiLJ6buZYLeyT5rDOBXm0xRBhjmLt2Dxe//iPjPv6F\nkjLDa1f3ZsY9gzi/e5KGuVIeJCI8PqI7OQUlvDhrvc+eV/fQQ8Cmfbk8MHUFy7KyadWoDi9e0YuR\npzb3+MkPSqnfdGtWj+v6tWHiwq1ceXorn9zsRd/RQa60zPDHT5ezZX8ez1x6CnPvT2VUn5Ya5kr5\nwB+HdKZhbBSPTlvNiZ6VXxv6rg5yn/ycxcrth3l8RHeu6duaSA1ypXymfp1IHrygK0szDzHj191e\nfz5tcgliB3ILef67dfRr34gRvZo7XY5SIenyPi0RwSf3C9BAD2L/nLmWvMISnhzZQw96KuWQsDDh\nihTfXHFcv38HqaWZh/gsfTu3DGxHp8R4p8tRSvmABnoQKikt4+H/rCKpXgz3nNvJ6XKUUj6igR6E\nJi3OYs2uHB4enkzdaG1VUypUaKAHmX1HCnlh1joGdWrCRackOV2OUsqHNNCDzD9mZFBQXMpjI7rr\ngVClQowGehBZvPkAXy7bwdiz2tMhIc7pcpRSPqaBHiSKS8t45OvVtGhQh7sGd3S6HKWUAzTQg8SH\nC7aybs8RHrk4mdgoPRCqVCjSQA8Ce3IKeGXOBlK7JDDUB2ejKaX8kwZ6EHh6egZFpWU8rgdClQpp\nGugBbsHG/UxbsZNxZ3egTeO6TpejlHKQBnoAKyop45Fpq2nVqA53pnZwuhyllMP06FkAm/DTFjbu\nzeW9G1KIiQx3uhyllMN0Dz1A7cw+yqvfb+C8bomc200PhCqlNNAD1lPT11BaZnj04mSnS1FK+QkN\n9AD0w/p9zPh1N38Y3JFWjWKdLkcp5Sc00ANMYUkpj05bTdvGsYw9u73T5Sil/IgeFA0w7/ywmS37\n8/jw5jOIjtADoUqp3wTEHvonP2fxzspCp8tw3LaD+bw+byMX9kji7M4JTpejlPIzARHo+48U8tPO\nEnYdPup0KY564ts1CMLDw/VAqFLqeAER6Bf1bAbAf3/d7XAlzpm7dg+z1+zhnnM70bxBHafLUUr5\noYAI9A4JcbSKD2PGr7ucLsURBcWlPDZtDR0S6nLLwHZOl6OU8lMBEegApyeFk555KOSaXZZmHuKG\nCT+TdTCfJ0b2ICoiYF4ypZSPBUw6nJ5odcgJlWaXVTsOc9P7PzPqzQVs3JvLU5f04MyOTZwuSynl\nxwKm22KzuDC6JsUz49dd3BzEzQ7rdh/h5dnrmbl6N/XrRPKXC7pwQ/+21I0OmJdKKeWQgEqJYac0\n48XZ69l1+CjN6gfXgcHN+3J5Zc4Gvlm5k7pREdx3XiduHtiOejGRTpemlAoQARXoF/W0Av2/v+4O\nmr30bQfz+df3G/jyl+1ER4Qz7uwOjB3UnoZ1o5wuTSkVYNwKdBG5F7gNEOAdY8wrItII+BRoC2wF\nRhtjDnmpTsDq7RIszS67Dh/l9bkb+XTJNsLChJvObMe4szuQEB/tdGlKqQBVY6CLSA+sMD8DKAJm\nisi3wFjge2PMsyLyEPAQ8KA3i4XAb3bZe6SAN9M2MWlxFsYYrj6jNXcN7khS/RinS1NKBTh39tC7\nAYuNMfkAIjIfuAwYCaTa43wIpOGDQA/UZpdDeUWM/2ETHy7YSnGpYdRpLbj7nE56tUSllMe4E+ir\ngKdFpDFwFLgISAcSjTHlZ/rsBnxyl4VAa3Y5mFfEhB+38MGCreQVlTCyV3PuPa8z7Zro/T+VUp5V\nY6AbYzJE5J/ALCAPWA6UVhjHiIipbHoRGYvVPENiYiJpaWm1KjQ3N/fYtMnxRXy5oZgvZ86lUYx/\ndqXPLihj5tZi5m4robgU+iSGc2nHOrSIP0zmqiVkOl2gOiGu259S/sqtg6LGmPeA9wBE5BlgO7BH\nRJoZY3aJSDNgbxXTvg28DZCSkmJSU1NrVWhaWhrl07bunsuXL84nO64dl/nZXvrO7KO8NX8TnyzZ\nRklpGSN6NeeuwR3plBjvdGnqJLhuf0r5K3d7uTQ1xuwVkdZY7ef9gHbADcCz9u+vvVZlBe39sNkl\n60A+b6Rt5ItftmMMXHZaC+5M7UhbbVpRSvmIu/3Qv7Db0IuBu4wx2SLyLPCZiNwCZAKjvVVkZYb3\nbMYLs5zv7bJxby5vzNvI1yt2Eh4mXHV6a24/uz0tG+rBTqWUb7nb5DKokmEHgHM9XpGbLjrFCnSn\nertk7Mrh9XkbmfHrLqIjwrhxQFvGntWexHra/VAp5YyAOlPUVXmzy3QfN7us2JbNa3M3MidjD3HR\nEdxxdgduGdiOxnF6QpBSylkBG+jg22aXJVsP8trcjfywfh/1Yqxrrdw4oC0NYvUUfaWUf/DPPn9u\nuugU39zJ6PW5G7hi/EJW7zjMgxd05aeHzuG+8zprmCul/EpA76H7otllR/ZRXpu7kaHJibxy1anE\nRgX0KlNKBbGA3kMHq9llqRfvZPTczLUAPDqiu4a5UsqvBXyge7PZZVnWIb5evpPbBrWnhd6YWSnl\n5wI+0NsnxNGtWT2me/gG0sYYnvx2DQnx0dyR2sGj81ZKKW8I+EAHGHZKksebXb5duYtfsrL589Au\nevs3pVRACIpA93SzS0FxKc/+dy3Jzeoxqk9Lj8xTKaW8LSgC3dPNLhN+2sKO7KP8fVg3wsPEI/NU\nSilvC4pAB881u+w7Usgb8zZxXrdEBnRs4qHqlFLK+4Im0D3V7PLS7HUUFJfy14u6eqIspZTymaAJ\ndE80u2TsyuHTJdu4rn8b2ifEebA6pZTyvqAJdDi5ZhdjDE9PzyA+JpJ7z+3kheqUUsq7girQy5td\nZtSi2WXeur38uHE/953XSa/RopQKSEEV6OXNLjNOsNmluLSMp6Zn0L5JXa7t18ZL1SmllHcFVaBD\n7ZpdJi/OYvO+PP56UTciw4NulSilQkTQpdeJNrsczi/m5TnrGdChMed2a+rN0pRSyquCLtBPtNnl\n1bkbOHy0mL8PS0ZETyJSSgWuoAt0cL/ZZcv+PCYu3MroPq1Ibl7PN8UppZSXBGWgu9vs8o8ZGUSF\nh3H/+Z19UZZSSnlVUAa6O80uCzcdYNaaPdw5uCNN42N8WJ1SSnlHUAY6VN/sUlpmeGr6Glo0qMMt\nXrp1nVJK+VrQBnp1zS5f/rKd1Ttz+MsFXYiJDPd1aUop5RVBG+hVNbvkFZbw/HfrOLVVA0b0au5Q\ndUop5XlBG+hQebPLW/M3sfdIIQ8P126KSqngEtSBXrHZZWf2Ud7+32Yu7tWcPm0aOlmaUkp5XFAH\nesVml+e/W0eZgQcv6OJwZUop5XlBHejwW7PLzFW7+WrZDm4d2I6WDWOdLksppTwu6AO9vNnlninL\naBIXxR2pHRyuSCmlvCPoA7282aWopIz7h3YhPibS6ZKUUsorIpwuwBduP6s9363ezeiUVk6XopRS\nXhMSgX5J7xZc0ruF02UopZRXBX2Ti1JKhQoNdKWUChIa6EopFSQ00JVSKki4Fegi8kcRWS0iq0Tk\nExGJEZFzROQXe9iHIhISB1iVUspf1RjoItICuAdIMcb0AMKBa4APgavsYZnADd4sVCmlVPXcbXKJ\nAOrYe+GxQB5QZIxZbz8+GxjlhfqUUkq5qcZmEmPMDhF5AcgCjgKzgM+A50QkxRiTDlwOVHrWjoiM\nBcYCJCYmkpaWVqtCc3Nzaz2tUidLtz8VCMQYU/0IIg2BL4ArgWxgKvA5sAl4DojGCvnhxphTa5jX\nYWBDNaPUBw5X8VgTYH+1xfq36pYtUJ7zZOZXm2lPZBp3xq1pnGDe/sD326Bufyc2TnWPtzHGJNRY\nhTGm2h/gCuA9l/+vB96oMM5Q4DM35vV2bR8H0muavz//1LTsgfCcJzO/2kx7ItO4M24ob3/e2B58\n/XyhvP25++NOG3oW0E9EYsW6xc+5QIaINAUQkWjgQWC8G/P65iQfD2ROLJunn/Nk5lebaU9kGnfG\nDeXtD3y/fLr9ndg4J72+amxyARCRx7GaXEqAZcCtwFPAcKwDq28aY1452WJqqCHdGJPizedQqiq6\n/alA4Fag+wMRGWuMedvpOlRo0u1PBYKACXSllFLV01P/lVIqSGigK6VUkNBAV0qpIBE0gS4idUUk\nXUSGO12LCi0i0k1ExovI5yJyh9P1qNDleKCLyAQR2SsiqyoMv0BE1onIRhF5yI1ZPYh1SQKl3OaJ\n7c8Yk2GMGQeMBs70Zr1KVcfxXi4ichaQC0w01pUbEZFwYD0wBNgOLAGuxrrS4z8qzOJmoBfQGIgB\n9htjvvVN9SrQeWL7M8bsFZERwB3AR8aYyb6qXylXjl/D3Bjzg4i0rTD4DGCjMWYzgIhMAUYaY/6B\ndTLT74hIKlAXSAaOisgMY0yZN+tWwcET2589n2nANBGZDmigK0c4HuhVaAFsc/l/O9C3qpGNMX8D\nEJEbsfbQNczVyTih7c/eobgM60J1M7xamVLV8NdArxVjzAdO16BCjzEmDUhzuAylnD8oWoUd/P76\n6i3tYUr5gm5/KiD5a6AvATqJSDsRiQKuAqY5XJMKHbr9qYDkeKCLyCfAQqCLiGwXkVuMMSXAH4Dv\ngAysa62vdrJOFZx0+1PBxPFui0oppTzD8T10pZRSnqGBrpRSQUIDXSmlgoQGulJKBQkNdKWUChIa\n6EopFSQ00JVSKkhooCulVJDQQFdKqSDx/yJQjMSQh+QJAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x10f89ff50>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (1-layer net)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "na8xX2yHZzNF"
   },
   "source": [
    "---\n",
    "Problem 2\n",
    "---------\n",
    "Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 393.856506\n",
      "Minibatch accuracy: 10.2%\n",
      "Validation accuracy: 29.0%\n",
      "Minibatch loss at step 2: 1532.909058\n",
      "Minibatch accuracy: 33.6%\n",
      "Validation accuracy: 24.4%\n",
      "Minibatch loss at step 4: 434.846985\n",
      "Minibatch accuracy: 49.2%\n",
      "Validation accuracy: 52.0%\n",
      "Minibatch loss at step 6: 195.022415\n",
      "Minibatch accuracy: 69.5%\n",
      "Validation accuracy: 69.4%\n",
      "Minibatch loss at step 8: 24.666218\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 10: 71.768379\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 72.6%\n",
      "Minibatch loss at step 12: 92.446915\n",
      "Minibatch accuracy: 87.5%\n",
      "Validation accuracy: 70.6%\n",
      "Minibatch loss at step 14: 7.594429\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 73.4%\n",
      "Minibatch loss at step 16: 10.730453\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 73.9%\n",
      "Minibatch loss at step 18: 7.906844\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 73.6%\n",
      "Minibatch loss at step 20: 14.397013\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 71.6%\n",
      "Minibatch loss at step 22: 1.068547\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 72.7%\n",
      "Minibatch loss at step 24: 12.600052\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 74.2%\n",
      "Minibatch loss at step 26: 0.257812\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 74.7%\n",
      "Minibatch loss at step 28: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.4%\n",
      "Minibatch loss at step 30: 2.465644\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 75.0%\n",
      "Minibatch loss at step 32: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.0%\n",
      "Minibatch loss at step 34: 0.000022\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 36: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 38: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 40: 0.000011\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 42: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 44: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 46: 0.000008\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 48: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 50: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 52: 0.000006\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 54: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 56: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 58: 0.000005\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 60: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 62: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 64: 0.000004\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 66: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 68: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 70: 0.000003\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 72: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 74: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 76: 0.000003\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 78: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 80: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 82: 0.000003\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 84: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 86: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 88: 0.000002\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 90: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 92: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 94: 0.000002\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 96: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 98: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Minibatch loss at step 100: 0.000002\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 74.8%\n",
      "Test accuracy: 82.7%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_batches = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    #offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    offset = ((step % num_batches) * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 2 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Since there are far too much parameters and no regularization, the accuracy of the batches is 100%. The generalization capability is poor, as shown in the validation and test accuracy."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "ww3SCBUdlkRc"
   },
   "source": [
    "---\n",
    "Problem 3\n",
    "---------\n",
    "Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides `nn.dropout()` for that, but you have to make sure it's only inserted during training.\n",
    "\n",
    "What happens to our extreme overfitting case?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  drop1 = tf.nn.dropout(lay1_train, 0.5)\n",
    "  logits = tf.matmul(drop1, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "    \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 509.164093\n",
      "Minibatch accuracy: 10.2%\n",
      "Validation accuracy: 41.2%\n",
      "Minibatch loss at step 2: 670.379761\n",
      "Minibatch accuracy: 46.9%\n",
      "Validation accuracy: 29.8%\n",
      "Minibatch loss at step 4: 576.739258\n",
      "Minibatch accuracy: 54.7%\n",
      "Validation accuracy: 51.3%\n",
      "Minibatch loss at step 6: 117.010681\n",
      "Minibatch accuracy: 80.5%\n",
      "Validation accuracy: 61.6%\n",
      "Minibatch loss at step 8: 15.351355\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 65.6%\n",
      "Minibatch loss at step 10: 4.698412\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 65.3%\n",
      "Minibatch loss at step 12: 0.000791\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.5%\n",
      "Minibatch loss at step 14: 3.532723\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 67.8%\n",
      "Minibatch loss at step 16: 9.192253\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 67.2%\n",
      "Minibatch loss at step 18: 0.082412\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 66.4%\n",
      "Minibatch loss at step 20: 13.709968\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 65.9%\n",
      "Minibatch loss at step 22: 7.164266\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 24: 2.486946\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 67.3%\n",
      "Minibatch loss at step 26: 17.428190\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 67.1%\n",
      "Minibatch loss at step 28: 0.311226\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 66.7%\n",
      "Minibatch loss at step 30: 2.423242\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 65.9%\n",
      "Minibatch loss at step 32: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 65.9%\n",
      "Minibatch loss at step 34: 0.420512\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 66.6%\n",
      "Minibatch loss at step 36: 1.362866\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 67.8%\n",
      "Minibatch loss at step 38: 1.778781\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.8%\n",
      "Minibatch loss at step 40: 0.980640\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 42: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.4%\n",
      "Minibatch loss at step 44: 0.661741\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 46: 0.570174\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.0%\n",
      "Minibatch loss at step 48: 0.187137\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.1%\n",
      "Minibatch loss at step 50: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.3%\n",
      "Minibatch loss at step 52: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.3%\n",
      "Minibatch loss at step 54: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.4%\n",
      "Minibatch loss at step 56: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.4%\n",
      "Minibatch loss at step 58: 0.307698\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 60: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 62: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 64: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.2%\n",
      "Minibatch loss at step 66: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.2%\n",
      "Minibatch loss at step 68: 0.522242\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 66.1%\n",
      "Minibatch loss at step 70: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.2%\n",
      "Minibatch loss at step 72: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 74: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 76: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 78: 0.099388\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.2%\n",
      "Minibatch loss at step 80: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.2%\n",
      "Minibatch loss at step 82: 0.043367\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.2%\n",
      "Minibatch loss at step 84: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.2%\n",
      "Minibatch loss at step 86: 0.605456\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 88: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.5%\n",
      "Minibatch loss at step 90: 0.998960\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.3%\n",
      "Minibatch loss at step 92: 0.972533\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 69.5%\n",
      "Minibatch loss at step 94: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 96: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 98: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 100: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.1%\n",
      "Test accuracy: 75.0%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_batches = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    #offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    offset = step % num_batches\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 2 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The first conclusion is that 100% of accuracy on the minibatches is more difficult achieved or to keep. As a result, the test accuracy is improved by 6%, the final net is more capable of generalization."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "-b1hTz3VWZjw"
   },
   "source": [
    "---\n",
    "Problem 4\n",
    "---------\n",
    "\n",
    "Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is [97.1%](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html?showComment=1391023266211#c8758720086795711595).\n",
    "\n",
    "One avenue you can explore is to add multiple layers.\n",
    "\n",
    "Another one is to use learning rate decay:\n",
    "\n",
    "    global_step = tf.Variable(0)  # count the number of steps taken.\n",
    "    learning_rate = tf.train.exponential_decay(0.5, global_step, ...)\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    " \n",
    " ---\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's do a first try with 2 layers. Note how the parameters are initialized, compared to the previous cases."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 100\n",
    "beta_regul = 1e-3\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  global_step = tf.Variable(0)\n",
    "\n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal(\n",
    "        [image_size * image_size, num_hidden_nodes1],\n",
    "        stddev=np.sqrt(2.0 / (image_size * image_size)))\n",
    "    )\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))\n",
    "  biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "  weights3 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes2, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes2)))\n",
    "  biases3 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)\n",
    "  logits = tf.matmul(lay2_train, weights3) + biases3\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \\\n",
    "      beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(weights3))\n",
    "  \n",
    "  # Optimizer.\n",
    "  learning_rate = tf.train.exponential_decay(0.5, global_step, 1000, 0.65, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay2_test, weights3) + biases3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 3.258846\n",
      "Minibatch accuracy: 6.2%\n",
      "Validation accuracy: 43.1%\n",
      "Minibatch loss at step 500: 0.934960\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 85.4%\n",
      "Minibatch loss at step 1000: 0.882697\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 86.4%\n",
      "Minibatch loss at step 1500: 0.578385\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 87.9%\n",
      "Minibatch loss at step 2000: 0.513481\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 87.9%\n",
      "Minibatch loss at step 2500: 0.519019\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 88.4%\n",
      "Minibatch loss at step 3000: 0.564591\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 88.4%\n",
      "Minibatch loss at step 3500: 0.579945\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 4000: 0.441892\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 88.9%\n",
      "Minibatch loss at step 4500: 0.435040\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.3%\n",
      "Minibatch loss at step 5000: 0.506730\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 5500: 0.501448\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 89.5%\n",
      "Minibatch loss at step 6000: 0.572144\n",
      "Minibatch accuracy: 87.5%\n",
      "Validation accuracy: 89.7%\n",
      "Minibatch loss at step 6500: 0.401036\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 7000: 0.513986\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 7500: 0.492684\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 8000: 0.570090\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 8500: 0.428472\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 9000: 0.467448\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.1%\n",
      "Test accuracy: 95.7%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 9001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This is getting really good. Let's try one layer deeper with dropouts."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 256\n",
    "num_hidden_nodes3 = 128\n",
    "keep_prob = 0.5\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  global_step = tf.Variable(0)\n",
    "\n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal(\n",
    "        [image_size * image_size, num_hidden_nodes1],\n",
    "        stddev=np.sqrt(2.0 / (image_size * image_size)))\n",
    "    )\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))\n",
    "  biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "  weights3 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes2, num_hidden_nodes3], stddev=np.sqrt(2.0 / num_hidden_nodes2)))\n",
    "  biases3 = tf.Variable(tf.zeros([num_hidden_nodes3]))\n",
    "  weights4 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes3, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes3)))\n",
    "  biases4 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)\n",
    "  lay3_train = tf.nn.relu(tf.matmul(lay2_train, weights3) + biases3)\n",
    "  logits = tf.matmul(lay3_train, weights4) + biases4\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "  \n",
    "  # Optimizer.\n",
    "  learning_rate = tf.train.exponential_decay(0.5, global_step, 4000, 0.65, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay3_valid = tf.nn.relu(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay3_valid, weights4) + biases4)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "  lay3_test = tf.nn.relu(tf.matmul(lay2_test, weights3) + biases3)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay3_test, weights4) + biases4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 2.339312\n",
      "Minibatch accuracy: 10.9%\n",
      "Validation accuracy: 37.3%\n",
      "Minibatch loss at step 500: 0.352447\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 86.2%\n",
      "Minibatch loss at step 1000: 0.461886\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 86.7%\n",
      "Minibatch loss at step 1500: 0.255395\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 88.1%\n",
      "Minibatch loss at step 2000: 0.214306\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 88.5%\n",
      "Minibatch loss at step 2500: 0.294321\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 88.9%\n",
      "Minibatch loss at step 3000: 0.332853\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 3500: 0.370949\n",
      "Minibatch accuracy: 87.5%\n",
      "Validation accuracy: 89.3%\n",
      "Minibatch loss at step 4000: 0.240599\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.1%\n",
      "Minibatch loss at step 4500: 0.234550\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.7%\n",
      "Minibatch loss at step 5000: 0.291464\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.7%\n",
      "Minibatch loss at step 5500: 0.226784\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 6000: 0.351109\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 6500: 0.243099\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 90.2%\n",
      "Minibatch loss at step 7000: 0.333592\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 7500: 0.237897\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 8000: 0.297810\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.3%\n",
      "Minibatch loss at step 8500: 0.121644\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 90.3%\n",
      "Minibatch loss at step 9000: 0.214696\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.1%\n",
      "Minibatch loss at step 9500: 0.152990\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 90.6%\n",
      "Minibatch loss at step 10000: 0.159044\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 90.1%\n",
      "Minibatch loss at step 10500: 0.169443\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 11000: 0.102549\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 90.4%\n",
      "Minibatch loss at step 11500: 0.123204\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.3%\n",
      "Minibatch loss at step 12000: 0.144668\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 12500: 0.137568\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 13000: 0.208974\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 91.0%\n",
      "Minibatch loss at step 13500: 0.054627\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 14000: 0.127670\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 14500: 0.157799\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 91.0%\n",
      "Minibatch loss at step 15000: 0.077068\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 90.8%\n",
      "Minibatch loss at step 15500: 0.105706\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 16000: 0.028911\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 16500: 0.061069\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 17000: 0.036268\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 91.0%\n",
      "Minibatch loss at step 17500: 0.018908\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 91.3%\n",
      "Minibatch loss at step 18000: 0.055497\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.9%\n",
      "Test accuracy: 96.4%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 18001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Huge! That's my best score on this dataset."
   ]
  }
 ],
 "metadata": {
  "colab": {
   "default_view": {},
   "name": "3_regularization.ipynb",
   "provenance": [],
   "version": "0.3.2",
   "views": {}
  },
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
