{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "kR-4eNdK6lYS"
   },
   "source": [
    "Deep Learning\n",
    "=============\n",
    "\n",
    "Assignment 3\n",
    "------------\n",
    "\n",
    "Previously in `2_fullyconnected.ipynb`, you trained a logistic regression and a neural network model.\n",
    "\n",
    "The goal of this assignment is to explore regularization techniques."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "collapsed": true,
    "id": "JLpLa8Jt7Vu4"
   },
   "outputs": [],
   "source": [
    "# These are all the modules we'll be using later. Make sure you can import them\n",
    "# before proceeding further.\n",
    "from __future__ import print_function\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from six.moves import cPickle as pickle"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "# Some personnal imports\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "1HrCK6e17WzV"
   },
   "source": [
    "First reload the data we generated in _notmnist.ipynb_."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "collapsed": false,
    "executionInfo": {
     "elapsed": 11777,
     "status": "ok",
     "timestamp": 1449849322348,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "y3-cj1bpmuxc",
    "outputId": "e03576f1-ebbe-4838-c388-f1777bcc9873"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 28, 28) (200000,)\n",
      "Validation set (10000, 28, 28) (10000,)\n",
      "Test set (10000, 28, 28) (10000,)\n"
     ]
    }
   ],
   "source": [
    "pickle_file = 'notMNIST.pickle'\n",
    "\n",
    "with open(pickle_file, 'rb') as f:\n",
    "  save = pickle.load(f)\n",
    "  train_dataset = save['train_dataset']\n",
    "  train_labels = save['train_labels']\n",
    "  valid_dataset = save['valid_dataset']\n",
    "  valid_labels = save['valid_labels']\n",
    "  test_dataset = save['test_dataset']\n",
    "  test_labels = save['test_labels']\n",
    "  del save  # hint to help gc free up memory\n",
    "  print('Training set', train_dataset.shape, train_labels.shape)\n",
    "  print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "  print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "L7aHrm6nGDMB"
   },
   "source": [
    "Reformat into a shape that's more adapted to the models we're going to train:\n",
    "- data as a flat matrix,\n",
    "- labels as float 1-hot encodings."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "collapsed": false,
    "executionInfo": {
     "elapsed": 11728,
     "status": "ok",
     "timestamp": 1449849322356,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "IRSyYiIIGIzS",
    "outputId": "3f8996ee-3574-4f44-c953-5c8a04636582"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 784) (200000, 10)\n",
      "Validation set (10000, 784) (10000, 10)\n",
      "Test set (10000, 784) (10000, 10)\n"
     ]
    }
   ],
   "source": [
    "image_size = 28\n",
    "num_labels = 10\n",
    "\n",
    "def reformat(dataset, labels):\n",
    "  dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n",
    "  # Map 2 to [0.0, 1.0, 0.0 ...], 3 to [0.0, 0.0, 1.0 ...]\n",
    "  labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n",
    "  return dataset, labels\n",
    "train_dataset, train_labels = reformat(train_dataset, train_labels)\n",
    "valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\n",
    "test_dataset, test_labels = reformat(test_dataset, test_labels)\n",
    "print('Training set', train_dataset.shape, train_labels.shape)\n",
    "print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "collapsed": true,
    "id": "RajPLaL_ZW6w"
   },
   "outputs": [],
   "source": [
    "def accuracy(predictions, labels):\n",
    "  return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))\n",
    "          / predictions.shape[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "sgLbUAQ1CW-1"
   },
   "source": [
    "---\n",
    "Problem 1\n",
    "---------\n",
    "\n",
    "Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor `t` using `nn.l2_loss(t)`. The right amount of regularization should improve your validation / test accuracy.\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's start with the logistic model:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_labels]))\n",
    "  biases = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  logits = tf.matmul(tf_train_dataset, weights) + biases\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + beta_regul * tf.nn.l2_loss(weights)\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  valid_prediction = tf.nn.softmax(\n",
    "    tf.matmul(tf_valid_dataset, weights) + biases)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 19.309465\n",
      "Minibatch accuracy: 6.2%\n",
      "Validation accuracy: 12.7%\n",
      "Minibatch loss at step 500: 2.463279\n",
      "Minibatch accuracy: 82.8%\n",
      "Validation accuracy: 76.4%\n",
      "Minibatch loss at step 1000: 1.775184\n",
      "Minibatch accuracy: 78.9%\n",
      "Validation accuracy: 78.3%\n",
      "Minibatch loss at step 1500: 0.983707\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 79.8%\n",
      "Minibatch loss at step 2000: 0.856673\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 80.7%\n",
      "Minibatch loss at step 2500: 0.862013\n",
      "Minibatch accuracy: 79.7%\n",
      "Validation accuracy: 81.2%\n",
      "Minibatch loss at step 3000: 0.778380\n",
      "Minibatch accuracy: 82.0%\n",
      "Validation accuracy: 81.9%\n",
      "Test accuracy: 88.9%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The L2 regularization introduces a new meta parameter that should be tuned. Since I do not have any idea of what should be the right value for this meta parameter, I will plot the accuracy by the meta parameter value (in a logarithmic scale)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:\n",
    "  with tf.Session(graph=graph) as session:\n",
    "    tf.initialize_all_variables().run()\n",
    "    for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "      offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "      batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "      batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "      feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "      _, l, predictions = session.run(\n",
    "        [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEOCAYAAACEiBAqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xu8lXP6//HXVVFK7IzDqCinyhA5pR+DbWJiZJpx6kBs\nDMYMBvMdh6FBTCQ5G5RGMigTkdNI2NJMo5hS0wGpXVFhHFKi075+f3zurdVu7b3W3uu413o/H4/1\naN3rPnyutfbdfd3357oP5u6IiEjxapTrAEREJLeUCEREipwSgYhIkVMiEBEpckoEIiJFTolARKTI\nKRFIXjKzpmZWaWatcx1LXZnZFDPrl8L8883s0DTHtKWZrTSzH6ZzuTHLv93Mzo/e9zCzD9KwzHrH\nbGbXm9ndSUx3r5mV1SvAAqJEUE/RCvp19NpgZqtjPuubwnJT2ogUmKK8yMXd93T3t1JZRvX1yN3X\nuntLd1+eeoSbtdUaOBn4a8zHKf/tko05XuJx9+vd/ZIkmhkCXGdmlkqsDZ0SQT1FK+g27r4NsAg4\nIeazJ3IdX6aYWeNsNpeRhWb3OyQtX+NKwjnAM+6+PkftG/VMPO6+CFgMHJ/WiBoYJYL0MKpttMys\nkZkNMLMPzexTM3vUzLaJxjU3syfM7HMz+zLae9vWzG4DDgEeio4shmzWkFljMxtrZsvN7Asze9XM\nOsSMb25md5vZ4mjZr5tZo2hcadTWV2ZWYWZ9os832Xs0swvM7JXofVUXza/NbD4wK/r8L2a2xMxW\nmNm/Y7syohivi777CjN7y8x2NLOHzOymat/nZTO7oJbf9pdmttDMPqma18y2ipa7R8xy2prZN1W/\ncbU2Loh+p3vN7AvgypjP55nZ/8zsudhuKDM7wczej37jO2J/IzO72cyGxUzb0czWxQs+Gvd69Lf+\nxMxGmlmLmPHLzOz3ZvZfYEXMZ4dF61Dskeeq6G+xo5ltb2YvRuvW/8zsGTPbKZp/s/XIqnW1mVkr\nM3s8mv9DM/tDtd9ropndFa1DH5hZ91r+RscDb9Q00sw6m9mkaFkzzOy4mHE7mNlL0d/zX9FvW33d\nq4q5l5nNjb7TIjO7yMy2A54Gdo/5rVrF+RvFXfcjbwAn1PL9Cp+765XiC1gI/KTaZ1cSVrCdgC0J\nh80jonGXAE9GnzcCDgK2isZNAfrW0lZj4Axgq2j++4ApMeNHAP8AdiAkp8Ojf/cEVgK/iNr8AdA5\nps1+Mcu4AJgQvW8KVALPAdsATaPPz4iGGwNXE/aqGkfjBgDvALtFw/tH0x4BLIhpZ2dgFVAS53tW\ntfsS0BJoB3xYFSfwEHBdzPRXAGNq+M0uANYS9lwtWnZvYDawR/QdBgKvxcS1krCBawz8AVgT0/bN\nwLCY5XcE1sYMT4mZtiNQGi1nR+BfwKCYaZcBb0XrSdOYzw6L8z2GAi9H32FH4MRoHWgJjAMerxZD\n32q/5wagdTT8JDAmWo/2ABZUTR/9XmuA06O2LgUW1rJOfg3sEzPcA3g/pt1F0TIaAz+Nftt20fhn\ngJHR9+gMLGXTdS825v8BB0XvWwH7V28vJobv/0bUsu5H4/sCk3O9HcnlK+cBFMKL+IlgAfD/YoZ3\nA76J3l8IvB77nydmuk02ykm0/cPoP8uWQBPCBm/PONNdDzxWwzKSSQSH1hKDAd8Ae0XDFcAxNUw7\nHzg8ev97YGwN01W1e0TMZ5cBz0XvjwQ+iBk3E+hZw7IuAOZV++w1Nt1QbhH9djsA5wGvVvt+n1CP\nRBAnlt7AP2OGlwG9q02zWSIAzgTeB7atYbndgI9r+ZtW/Z6to3VlPdHGOBp/CfBizO81M2Zcq2gd\n2yZOu42i5e4a81lsIjiWakmEsAd/BRs39G1jxg2Js+5VJYLlwFnA1tWWlygRXE8N6340vifw32T/\nzxXiS11DmbML8GLUtfAF8B+A6FB2BDAJGGuhC+fPZskVq6Jul6HR4fxXwNxo1A8Ie7KNCUkoXjwf\npvB9PqoWx9VRt8qXwBeE/7TbR6Pb1BADwKOEowmifx+tQ7uLCBsy3H0S0MjMDjWz/QkJ8aValrOk\n2nA74IGYv8+nhETQNmrj++k9bC0+ThBnXGa2s5k9aWYfRX+vh9j4O1X5KM6sscs4lLCB/Lm7V3Uf\nbW1mI6Iukq8IRwrVl1uTHxKSW+xvsojwd6sSW6BdHU2/dfUFuXsl4YigZQ1t7Uw4WoxV1VbV2UCx\nv231v1OsXsApwOKo6+rgWqaNlWjdbwl8leSyCpISQeZ8RDhK2C56tXL3Fu7+hYezIa5z970Je7an\nAlV9lomKXmcD3YGj3L0E6BR9boQ9yfWEQ/3qlhAOkeP5BmgeMxzvdL3v4zKzY4CLgF7u3grYDviO\njXWSj2qIAWAUcIqZHUjY6L5Qw3RVdol5vyuh6yB2Wf2j12h331DLcqr/rouBsmp/n63dfTrhd/y+\n3ShJx24kq/9eO9fS7hBC99ePor/Xr9i8CF7j3zzqHx8LnOvu82JGXRXFdFC03J9WW25t69Fyor34\nmM92pZ7JjlA36lDDuKXV2oltqyrZxP62u1ADd3/L3U8kdIu9AjxeNSpBfLWt+wB7A+8mWEZBUyLI\nnAeBwWbWFiAq8PWM3nc3s72jDcwqwsa7aiP2CbB7LcttSdjofmlmWwN/rhrh4ayNUcBdUXuNzOzw\nqJ1HgROiglvjqNjYOZp1BmHj3NTMOgFlCb5bS8Le8+dm1hS4kXBEUGUEMMjMdou+bxeLirjuvpBw\nFPMwoU8/0ZkmV5rZNmbWnpB8RseMexQ4jZBERyVYTnUPAgMsKrRHBcaTonHjga5mdpyFM3l+D5TE\nzDsDONrMWptZK0I3R01aEv7Gq8xsV+DyZAM0sy0I3SgPuPvzcZa7GvjazLYHrq02vsb1yN3XEmoK\ngyycXLAH8DsSH53V5EVCHSSeNwlHbpdE692xhO6iMe6+hvBb3xCte/sCcU+djuLsbWYtCf9XVrHp\n/5kdLaYIX01t6z7AUdR+NFnwlAjSI94eyWDCXstrZrYCmAwcEI1rAzxLOKSeCTzv7k9G4+4AzrJw\nlsktcZY7glA0W07Yi5lUbfzvCIfB06PpBgLm7h8SDq2vIXTlTAN+FM1zK6GP/FPgATbfIFT/fs8R\n/oN/SOjz/xT4LGb8LYQ9/arvfj+bJopHgH1JvPH2aDnvAlMJG4/Hvh/pvgB4D1jp7m8nWNamC3Yf\nDdwDPB11rfwHOCYat5xQQLwn+l6tCXu9a6LZXwCeB+YQir/j4sRd5U+EIvlXwFOEvfuapq3+2e6E\ns3+ujM6GqTorZnvgNkI943PCOlD9yCreehTb1q8JRxCLgImE/vTaTnuuba97JNDLzJpsNlPY2Pck\nHPV+HsV9mofTNqviaENYh4YR9vLXxC4i5v05hPrTl4RuxTOjNt4lJJRFUVdfbNKmtnXfzNoRjlAS\nHZkWNIuKJbVPZHYZcC7hcHIWoXtib8J/8BaEP87p7r4qzrwVhNPiKoF17t41TbFLAxXtFd7n7jV1\nJ9RlWY8Bs919UOqR1dhGY0Li7ekpXuhVqCycsvq+uw9LOHHty7mTcPbUhemJLGF79wLT3P2RbLSX\nrxImgqiPcjLQyd3XmtkYwqHgb4HL3X2yhUu0d3f3P8WZfwGhH/PLtEcvDY6ZbUnYMy5396EpLmtP\n4G1gb3dflo74YpZ9HGFvfy1hT7I/4WysXF00VZDMbB9CPX6OmR1GONLq7e6v5Di0opJs11BjoEV0\n6LcVodCzl7tPjsZPJFxiHo/VoR0pYNHZPV8QjiL/kuKyBhOuVbgh3UkgciThtODlwNHAL5UEMmJb\n4DkzW0XoKhyoJJB9yXYNXUIoSq4mnOPb38wmA7e6+3gzu5xwcc+2ceZdQOgf3UDohxye1m8gIiIp\n2ay4U11UeOlFOO96BeHc936Ews09ZjaAUKhZW8MiDnf3ZWa2A/CKmc2NOZKIbacobzAmIpIKd0/5\nnlzJdNkcQ7gtwBfRedpPE656fN/de7j7IYRT+uJesFF12O7unxHOrqixWJzrq+vS8bruuusKos10\nLLM+y6jLPMlOm2i6VMc3lFeuvkc+rp8NZd1MNE26JJMIFgPdzKxZdD56d2ButIePhRuaXUs47XAT\n0bm/W0fvWxAuevlvuoLPR6WlpQXRZjqWWZ9l1GWeZKdNNF2i8RUVFUm1k+9ysW5mqt1Ul9lQ1s26\ntltfydYIriNctLOOcH76rwj3y/kt4Tzfp939j9G0OwPD3b1ndEHRuGiaJoT7fcQ7Nx4z83RmOJF0\nKSsrY+TIkbkOQ2QzZoanoWsoqUSQDUoEkq/Ky8tztjctUhslAhGRIpeuRKDz+0USKC8vz3UIIhml\nRCAiUuTUNSQi0kCpa0hERNJCiUAkAdUIpNApEYiIFDnVCEREGijVCEREJC2UCEQSUI1ACp0SgYhI\nkVONQESkgVKNQERE0kKJQCQB1Qik0CkRiIgUOdUIREQaKNUIREQkLZQIRBJQjUAKnRKBiEiRU41A\nJM99/TW89hpstx3ssgu0aQNbbpnrqCQfpKtG0CQdwYhI+s2ZA/fdB088AQcfDKtXw+LFsHw5bL99\nSAq77AK77rrxfdXwTjtBIx3vS5KUCEQSKC8vp7S0NCttrV8Pzz4L994L8+bB+efDrFnhKCB2muXL\nYcmSkBiWLIGKCpg0KbxfsgS++gpat940UbRvDx06QMeOsPPOYCnvRyanshI++gjeew8WLoSuXWH/\n/bPXviSmriGRBLKRCJYvh+HD4cEHYbfd4Le/hZNOqn8X0HffhY1vVWJYsgQWLAgb4/fegzVrQkKo\n/tprL2jevH5trly5cfmxrw8+gJKSsPxddw0Ja4stoE+f8Np77/q1J+nrGkoqEZjZZcC5QCUwCzgb\n2Bu4H2gBVACnu/uqOPMeB9xJKEyPcPfBNbShRCB5Z8oU+Pe/4YADoEuXsEFLF3f4179C989LL8Gp\np4YEsP/+6WujJl98EX+jvWAB7Lhj/CTRtm2IuaIi/rwrVmw84oh9degALVtu+r2nTYPRo2HMmNDN\n1acP9O4Nu++e+e9eSLKWCMysNTAZ6OTua81sDPAi8FvgcnefbGZlwO7u/qdq8zYC3ge6A0uBaUAf\nd58Xpx0lAskrGzbAvvuGBLBoEcycCTvsEJJC1atLl9BtU5dujtWr4fHHQwL45hv4zW+grCy9Saa+\nNmyofUNfWbl5oujUKfzbpk3d6xKVlfDPf4akMHYstGsXksJpp4XEI7XLdiKYAnQBVgJPA3cDf3f3\nVtE0bYGX3X2favN2A65z9+Oj4asAj3dUoEQg+Wb0aLjrLhg0qJyjjy5lwwaYPx+mT9/0BZsnh732\ngsaNN13e/Pnwl7/AqFFw2GFh7//YYxtOUffrr6FJk/p3HSWyfj28/no4Shg3Dn70o5AUTjklFL9l\nc9nuGroE+DOwGpjg7v3NbDJwq7uPN7PLCRv8bavNdzLQw93Pj4bPALq6+yVx2lAikLxRWQmdO8PQ\nodCsWc01AndYunRjUpgxI/z76aew334hMXTsGLp+3n4bzjkHfv3rUAeQmq1dCxMmhGT8wgvhrKne\nvUPdZLvtch1d/sja6aNmVgL0AtoBK4CxZtYPOAe4x8wGAOOBtakGU1ZWRvv27QEoKSmhS5cu3/8H\nrLq6U8MazsbwwIHlVFZCjx6lmJXWOn2bNvDBB+UccQQMGBDGP/98OfPnA5Qycybsv385l14alpcP\n3y/fh//1r3K23hr+9rdSvv0Wbr21nEcfhd//vpSDDoLKynKaNYPddy9lq63g00/D8L77ltK8OVRU\nlNO0KXTtGobnzAnDpaVheObMcrbYIn++b7LDVe8rKipIp2S6hk4h7NWfFw33Bw5194tiptkLeNTd\nu1WbtxtwvbsfFw2ra0jyXmVlKNgOHgw/+1muo5FYK1fC5Mnh39WrN319+23yn7nDhRfCRReFuk9D\nlc0LyhYD3cysGbCGUPidZmY7uPtnUUH4WuCBOPNOA/Y0s3bAMqAP0DfVoEUyadw4aNYMjj8+DJdn\n8ToCqV3Llhv/LqmYNy90+3XoAH37wuWXw557pr7chiphmcrdpwJjgenAu4ABw4C+ZvYeMAf42N1H\nApjZzmb2fDTvBuAiYAIwGxjt7nMz8D1E0qKyEgYOhD/9SRc8FbJOncJ1G3PnQqtW0K1bKEq/9Vau\nI8sNXVAmEmPcOLjpplDYVSIoHqtWwYgRcMcd4aK3P/wBTjgh/8/oyupZQ9mgRCC55g4HHgjXXw+9\neuU6GsmF9evh73+HIUNCLeH//g/OOAOaNs11ZPHpwTQiafbcc+Hfn/98089jz9iQwtakSagZvPNO\nuN/T2LHhVN+bb4Yvv8x1dJmjRCBCOBq44QbVBiQwg+7dw/UfL78cist77AGXXRauMi80SgQihIuW\n1q2L3yWkM4aKW+fO8Mgj4RYjTZqE7sPTTw/JoVAoEUjRiz0ayPfioORO27ahdrBgQUgOP/4xXHNN\nuDahodNqL0XvH/8It20+6aT441UjkFjbbgtXXRWOEBYsgH322VhfaqiUCCRvLVuW+TaqjgYGDNDR\ngNRN69bh6XHDh4ezi3r1CndubYi06kteeuutcFvjhx/ObDsTJoS7ap58cs3TqEYgtTnmmHB00LVr\nuDnezTeHm+Y1JLqOQPKOe7hNc/fu8NBD4dWzZ2baOfxwuPjicMqgSKoWLgzr04cfhudN/OQnmW1P\n1xFIwRo9OpzBM3BgeH7v2WeHJ4Wl26uvhnPDTzut9ulUI5Bk7bZbqBfccktYb08/PTyGNN8pEUhe\nWb0arrwS7rwz9Nkfemh4kMsvfgFz5qSvnarawLXXbv4AGZFUmIV6wZw54XYVnTuHi9M2bMh1ZDVT\n15DklRtvhNmzw1FBrFGjQkF38mTYZZfU23nttXAb4jlzlAgks+bMCU+jW7EC7r8/7Nyki+41JAXn\n44/DcwDeeSc8u7a6IUNg5Eh4883Un1J11FFw7rlw5pmpLUckGe7w2GPhZna9esGgQel50ppqBFJw\nrr46PMYxXhKAcIrecceFewGlchFPeXl4vGS/fslOX17/xkQI3UVnnBFue92kSXge88iR4bbn+UCJ\nQPLC1KmheHvVVTVPYxaOCtq3Dw81X7++fm0NHBiuCG2SzGOZRNKopCTUC154IZwa/fHHuY4oUNeQ\n5FzVaZznnw9lZYmnX7s2HBW0aRNOLa3LTeLefDO08d57SgTS8KlrSArGmDGwZk3y/fVbbhluDzxr\nVigg18UNN+hoQKQ6JQLJqW+/3fR00WRtvXU4vH7ySbjnnuTm+ec/w4U+/fvXLUbVCKTQab9Icmro\n0HA63RFH1H3eHXYIt4j48Y9hxx2hd+/ap7/hBvjjH2GLLeoXq0ihUo1AcmbpUthvP5g2LVyRWV8z\nZ4b7vTzxRLgtRTxTpoTbSLz/fuhaEikEuo5AGryysnAHx0GDUl/WG2/AqaeGW0ofeODm448/Plyd\nfMEFqbclki9ULJYG7e23Q7fO1VenZ3lHHQUPPBBuTvfhh5uOmzo1XK2czBlJ8ahGIIVONQLJOne4\n9FL485+hZcv0Lfekk+DTT6FHj1AY3mmn8PkNN4TrE5o2TV9bIoUkqa4hM7sMOBeoBGYBZwN7Aw8A\nzYB1wG/c/e0481YAK6J517l71xraUNdQkRgzBgYPDkcFmXgYzA03hLuWlpeH6wVOOgnmz1cikMKT\ntRqBmbUGJgOd3H2tmY0BXgT6AUPdfYKZHQ9c4e5Hx5l/AXCQu3+ZoB0lgiLw7bew997hJnJHHpmZ\nNtzDDeXmzw9nCJ1wAlx0UWbaEsmlbNcIGgMtzKwJ0Bz4mLCHv200viT6LB6rQztS4G6/HQ45JHNJ\nAMKVxvfdF54t++678KtfpbY81Qik0CWsEbj7UjMbCiwGVgMT3H2imX0EvByNM+CwmhYBvGJmG4Bh\n7j48TbFLA7N0KdxxRzhdNNMaNw6nk37yCTRrlvn2RBqyhInAzEqAXkA7Ql//383sdKAr8Dt3f8bM\nTgH+ChwbZxGHu/syM9uBkBDmuvvkeG2VlZXRvn17AEpKSujSpcv3z4ut2ivTcMMdvuUWOO+8Unbb\nLT/iSXa4tLQ0r+LRcPEOV72vqKggnZKpEZwC9HD386Lh/kA3oJ+7t4qZboW7b1vDYqqmuQ5Y6e63\nxxmnGkEBe+cdOPFEmDcPttkm19GIFIZs1ggWA93MrJmZGdAdmAMsNbOjomC6A+/HCbK5mW0dvW8B\n/BT4b6pBS8NSdbrojTc2zCQQuzcmUoiSqRFMNbOxwHTCaaLTgWHADOAuM2sMfAecD2BmOwPD3b0n\nsBMwzsw8ausxd5+QkW8ieWvsWFi1qv4XdIlIZukWE5JR330XThcdOTJc/Ssi6aNbTEiDcPvtcNBB\nSgIi+UyJQDJm2bKQCG69NdeRpEY1Ail0SgSSMddcEy7m2n33XEciIrVRjUAy4p13wp1A33uvYZ4p\nJNIQqEYgeWvlyvAg+oZ6uqhIsVEikLT67rvwAJgDD4Rzz811NOmhGoEUOiUCSZt166BPH9h++/CQ\nGEv5gFVEskE1AkmLyko46yz4/HN45hk9F1gkG9JVI9ATyiRl7nDJJbBoUXhmsJKASMOiriFJ2YAB\nMGUKPPccNG+e62jSTzUCKXQ6IpCUDBkCTz0FkyaFB8GISMOjGoHU2/DhMGgQvPkmtG2b62hEio9q\nBJJTY8bA9dfDG28oCYg0dKoRSJ29+GIoDv/jH7DnnrmOJvNUI5BCpyMCqZNJk8JzBcaPh86dcx2N\niKSDagSStHfegeOPDw+F794919GIiO41JFk1Zw6ccAIMG6YkIFJolAgkoYULoUePcKroL36R62iy\nTzUCKXRKBFKrZcvg2GPhyiuhf/9cRyMimaAagdToiy/CIyb79AkPmRGR/JKuGoESgcS1ciUccwwc\neWR41KTuJCqSf1QsloypeqbAfvspCYBqBFL4lAhkM9dcAyUleqaASLFIqmvIzC4DzgUqgVnA2cDe\nwANAM2Ad8Bt3fzvOvMcBdxKSzgh3H1xDG+oaygOffQYdO8KsWdCmTa6jEZHaZK1ryMxaAxcDB7r7\nfoSrkfsCtwLXufsBwHXAkDjzNgLuBXoA+wB9zaxTqkFL5tx9N5x6qpKASDFJtmuoMdDCzJoAzYGP\nCUcHVTceLok+q64r8IG7L3L3dcBooFdqIUumfP013H8/XHFFriPJL6oRSKFLeK8hd19qZkOBxcBq\nYIK7TzSzj4CXo3EGHBZn9jbAkpjhjwjJQfLQ/feHC8f22CPXkYhINiVMBGZWQtiLbwesAP5uZqcT\nNui/c/dnzOwU4K/AsakEU1ZWRvv27QEoKSmhS5culJaWAhv3yjScmeGXXy5n8GCYNCk/4smn4dLS\n0ryKR8PFO1z1vqKignRKWCyONvI93P28aLg/0A3o5+6tYqZb4e7bVpu3G3C9ux8XDV8FeLyCsYrF\nuXXvvTBxYnjwvIg0DNm8jmAx0M3MmpmZAd2BOcBSMzsqCqY78H6ceacBe5pZOzPbEugDjE81aEmv\ntWvDfYSuvjrXkeSn2L0xkUKUTI1gqpmNBaYTThOdDgwDZgB3mVlj4DvgfAAz2xkY7u493X2DmV0E\nTGDj6aNzM/NVpL4efxz22gsOPTTXkYhILugWE0VuwwbYZx/4y1/gJz/JdTQiUhe6xYSkxbhx4Sri\no4/OdSQikitKBEXMHQYNgj/+UbeSqI1qBFLolAiK2D/+AevXQ8+euY5ERHJJNYIidsQR8JvfQN++\nuY5EROpDNQJJyZtvhqePnXpqriMRkVxTIihSgwaFx082SXgCsahGIIVOm4Ei9M474TbTuopYREA1\ngqJ0yinw4x/DpZfmOhIRSYWeWSz1MndueCD9woXQokWuoxGRVKhYLPUyeDBccomSQF2oRiCFTjWC\nIlJRAc89B/Pn5zoSEckn6hoqIr/9LbRsCbfckutIRCQdVCOQOlm+HH70o1Aj2GmnXEcjIumgGoHU\nyR13wOmnKwnUh2oEUuhUIygCX34JDz0E//lPriMRkXykrqEicOONsGABPPxwriMRkXRSjUCSsmoV\n7L47TJoEnTrlOhoRSSfVCCQpw4aFC8iUBOpPNQIpdKoRFLA1a2DoUHj++VxHIiL5TF1DBWzYsHBj\nuRdfzHUkIpIJqhFIrdavh44d4ZFHwg3mRKTwqEZQoD79FDZsSH05Tz4JbdooCaSDagRS6FQjyAPu\nMHEiDBkC//43VFbCvvtCly5wwAHh1bkzbLVVcsurrAwPnrnttszGLSKFIamuITO7DDgXqARmAecA\njwAdoklaAV+6+4Fx5q0AVkTzrnP3rjW0UXRdQ+vWhT33224L7//v/6BfP/juO3j3XZg+HWbMCP++\n9x7sttvGxHDAASFRbLfd5st99lkYOBDefhss5YNGEclXWasRmFlrYDLQyd3XmtkY4AV3HxUzzW3A\nV+5+U5z5FwAHufuXCdopmkSwcmW40vfOO8M5/n/4Axx/fO0b7bVrYfbsjYlh+vSQLFq12jw5nHoq\nXHEFnHxy9r6TiGRfthPBFKALsBIYB9zl7hNjplkMHO3uH8aZfyFwsLt/nqCdgk8Ey5bB3XfD8OHQ\nvXtIAAcfXP/lVVbChx9umhymT4ftt4eZM6GRKkBpUV5eTmlpaa7DENlMuhJBwhqBuy81s6HAYmA1\nMKFaEjgCWB4vCVQtAnjFzDYAw9x9eKpBNzRz54bun3Hjwo3fpk4NRwKpatQI9torvE49dePnlZVK\nAiKSvISJwMxKgF5AO0Jf/1gz6+fuj0eT9AWeqGURh7v7MjPbgZAQ5rr75HgTlpWV0b59ewBKSkro\n0qXL93tiVWduNJTh118vZ+ZMmDixlGnT4Gc/K+fhh6FXr8y336hR7r9/IQ2XlpbmVTwaLt7hqvcV\nFRWkUzJdQ6cAPdz9vGi4P3Cou19kZo2Bj4ED3X1pwsbMrgNWuvvtccYVRNfQhg1hz3/IkHDXz9//\nHs48M/kzfkREkpXN6wgWA93MrJmZGdAdmBuNOxaYW1MSMLPmZrZ19L4F8FPgv6kGna/Gjg0Xcd1+\nO1x1VeghcApOAAAMYklEQVQSuuACJYGGLnZvTKQQJVMjmGpmY4HpwLro32HR6N5U6xYys52B4e7e\nE9gJGGdmHrX1mLtPSGP8eePdd+HCC+Gpp+DII3MdjYhI8nSLiTRYswYOOSR0A511Vq6jEZFioXsN\n5ZErroD588PRgC7gEpFs0b2G8sSkSfC3v8GDDyoJFCrVCKTQKRGk4OuvQ1fQsGGwww65jkZEpH7U\nNZSCX/0qHAUML7pL5EQkH2TtymKJb/x4eO21cLaQiEhDpq6hevjsM/j1r8NDX1q2zHU0kmmqEUih\nUyKoI3c4/3zo3x+OOCLX0YiIpE41gjoaORLuuCPcOK5p01xHIyLFTNcR5MCiReG20a++Cvvtl+to\nRKTY6TqCLKushLKy8AwBJYHiohqBFDolgiTdeSesXx9uIyEiUkjUNZSE2bOhtBTeeis9D5QREUkH\ndQ1lydq1cMYZcPPNSgIiUpiUCBIYOBB22QXOPTfXkUiuqEYghU5XFtdiyhQYMSI8HF43lBORQqUa\nQQ2++Qa6dIFbb4Vf/jLX0YiIbE7XEWTYhRfCt9+GC8hERPKRisUZ9NJL4XXXXbmORPKBagRS6FQj\nqObzz8Ptpf/2N9h221xHIyKSeeoaiuEOvXtD27Zw++05DUVEJCE9jyADnngiXDw2alSuIxERyR7V\nCCIffQSXXRa6hJo1y3U0kk9UI5BCp0RAePbwz38e7iN0wAG5jkZEJLuSqhGY2WXAuUAlMAs4B3gE\n6BBN0gr40t0PjDPvccCdhKQzwt0H19BGTmoEa9bAz34GHTvCfffpwjERaTiydh2BmbUGJgOd3H2t\nmY0BXnD3UTHT3AZ85e43VZu3EfA+0B1YCkwD+rj7vDjtZD0RVFZC376wYQOMGQONG2e1eRGRlGT7\nOoLGQAszawI0J2zUY50GPBFnvq7AB+6+yN3XAaOBXvUNNp3c4Xe/g08+CXUBJQGpiWoEUugSJgJ3\nXwoMBRYDHxP2/CdWjTezI4Dl7v5hnNnbAEtihj+KPsu5m2+GN9+EZ59VcVhEilvC00fNrISwF98O\nWAGMNbN+7v54NElf4h8N1FlZWRnt27cHoKSkhC5dulBaWgps3CtLx/CIEXDPPeXcey9su236l6/h\nwhouLS3Nq3g0XLzDVe8rKipIp2RqBKcAPdz9vGi4P3Cou19kZo0JRwkHRkcO1eftBlzv7sdFw1cB\nHq9gnK0awfjxcMEF8MYb0KFD4ulFRPJVNmsEi4FuZtbMzIxQ+J0bjTsWmBsvCUSmAXuaWTsz2xLo\nA4xPNej6+uc/w+0jxo9XEpDkxe6NiRSiZGoEU4GxwHTgXcCAYdHo3lTrFjKznc3s+WjeDcBFwARg\nNjDa3eeSA7Nnw0knhcLwIYfkIgIRkfxUFPcaWrIEDj8cbrkF+vXLSBMiIlmn21An6fPPoUePcPsI\nJQERkc0VdCL45hvo2RNOPDEkApH6UI1ACl3BJoJ168ItpTt2DF1CIiISX0HWCNzhnHPg00/hmWdg\niy3SslgRkbyi5xHU4o9/hLlz4dVXlQRERBIpuK6hO+8MRwEvvAAtWuQ6GikEqhFIoSuoI4InnoCh\nQ8OFYz/4Qa6jERFpGAqmRvDKK3DGGaE7aN990xiYiEieytrzCLIllUTwwQfhgrGnnoIjjkhzYCIi\neUoXlMUYMAAuvVRJQDJDNQIpdA2+RjBjRriT6IgRuY5ERKRhavBdQyecAMcdBxdfnIGgRETymK4j\nACZPDncVffrpXEciItJwNdgagXu4cOz666Fp01xHI4VMNQIpdA02Ebz8Mnz2WThlVERE6q9B1ggq\nK+Hgg+Gaa+DkkzMcmIhInirq00efegoaNQpPHBMRkdQ0uESwfn24buDPfwZLOQ+KJKYagRS6BpcI\nRo2CH/4QfvrTXEciIlIYGlSNYM0a6NAh3FzusMOyFJiISJ4qyhrBgw/CfvspCYiIpFODSQSrVsGg\nQXDTTbmORIqNagRS6BpMIrjrLjj6aNh//1xHIiJSWJKqEZjZZcC5QCUwCzjb3dea2cXAb4D1wAvu\nflWceSuAFdG869y9aw1t1Fgj+OKLUBuYMgX22iup7yUiUvCydq8hM2sNXAx0ijb+Y4A+ZrYYOBHo\n7O7rzWz7GhZRCZS6+5f1DfLWW8M1A0oCIiLpl2zXUGOghZk1AZoDS4ELgVvcfT2Au/+vhnmtDu1s\nZtkyGD4c/vSn+i5BJDWqEUihS7iBdvelwFBgMfAx8JW7TwQ6AEea2b/N7HUzO7imRQCvmNk0Mzuv\nrgHedBOUlUHbtnWdU0REkpFM11AJ0AtoR+jr/7uZnR7N28rdu5nZIcCTwO5xFnG4uy8zsx0ICWGu\nu0+O11ZZWRnt27cHoKSkhB137MLo0aXMm7dxr6y0tBTQsIazN1xaWppX8Wi4eIer3ldUVJBOCYvF\nZnYK0MPdz4uG+wPdgN2Awe7+RvT5fOBQd/+8lmVdB6x099vjjNusWHzWWbDbbuFW0yIisqlsXlC2\nGOhmZs3MzIDuwBzgGeAnUTAdgC2qJwEza25mW0fvWwA/Bf6bTGCzZ8NLL8Hllyf9XUQyInZvTKQQ\nJewacvepZjYWmA6si/4dFo3+q5nNAtYAZwKY2c7AcHfvCewEjDMzj9p6zN0nJBPYgAFwxRWwzTZ1\n/UoiIlIXeXmvoalTw+miH3wAW22V48BERPJUQd9r6JprwhGBkoCISOblXSJ47TVYuBDOOSfXkYgE\nqhFIocurRFD1QPqBA2GLLXIdjYhIccirGsGzzzrXXgszZoRHUYqISM2ydq+hbLrmmnCraSUBEZHs\nyatNbsuW0LNnrqMQ2ZRqBFLo8ioRDBqkB9KLiGRbXtUI8iUWEZGGoKCvIxARkexRIhBJQDUCKXRK\nBCIiRU41AhGRBko1AhERSQslApEEVCOQQqdEICJS5FQjEBFpoFQjEBGRtFAiEElANQIpdEoEIiJF\nTjUCEZEGSjUCERFJCyUCkQRUI5BCl1QiMLPLzOy/ZjbTzB4zsy2jzy82s7lmNsvMbqlh3uPMbJ6Z\nvW9mV6YzeJFsmDFjRq5DEMmohI+qNLPWwMVAJ3dfa2ZjgD5mthg4Eejs7uvNbPs48zYC7gW6A0uB\naWb2rLvPS+u3EMmgr776KtchiGRUsl1DjYEWZtYEaE7YqF8I3OLu6wHc/X9x5usKfODui9x9HTAa\n6JV62PkrF90ImWgzHcuszzLqMk+y0yaarli6fnL1PfNx/Wwo62Zd262vhInA3ZcCQ4HFwMfAV+4+\nEegAHGlm/zaz183s4DiztwGWxAx/FH1WsJQIUltGPiaCioqKpNrJd0oEqc1fyIkAd6/1BZQArwLb\nEY4MngZOB2YBd0XTHAIsiDPvycCwmOEzgLtraMf10ksvvfSq2yvRNjyZV8IaAXAMYSP/BYCZjQMO\nI+zpP02IZJqZVZrZD9z985h5PwZ2jRluG322mXScCysiInWXTI1gMdDNzJqZmREKv3OAZ4CfAJhZ\nB2CLakkAYBqwp5m1i8406gOMT1v0IiKSsoRHBO4+1czGAtOBddG/w6LRfzWzWcAa4EwAM9sZGO7u\nPd19g5ldBEwgJJ0R7j43A99DRETqKW9uMSEiIrmhK4tFRIqcEoGISJHL+0RgZs3NbJqZ/SzXsYhU\nMbNOZna/mT1pZr/OdTwiscysl5kNM7MnzOzYhNPne43AzG4AVgJz3P3FXMcjEis6k+4Rdz8z17GI\nVGdmJcAQdz+vtumyckRgZiPM7BMzm1nt81pvSGdmxxBOVf0M0HUGknb1XTejaU4Enge0gyIZkcr6\nGbkWuC9hO9k4IjCzHwOrgFHuvl/0WSPgfWJuSAf0cfd5ZtYfOBDYBlgB7AOsdvdfZjxYKSr1XDcP\nIOxlLYumf97de+bkC0hBS2H9vA24BJjg7q8laieZK4tT5u6TzaxdtY+/vyEdgJlV3ZBunrs/Cjxa\nNaGZnQnEu6mdSErqu26a2VFmdhXQFHghq0FL0Uhh/byYkCi2MbM93X0YtchKIqhBvBvSdY03obuP\nykpEIkHCddPd3wDeyGZQIpFk1s97gHuSXWDenzUkIiKZlctEkPQN6USyTOum5LO0r5/ZTATGpmf+\n6IZ0ki+0bko+y/j6ma3TRx8H/gV0MLPFZna2u28gPAJzAjAbGK0b0km2ad2UfJat9TPvLygTEZHM\nUrFYRKTIKRGIiBQ5JQIRkSKnRCAiUuSUCEREipwSgYhIkVMiEBEpckoEIiJF7v8DxjGz/eemUrkA\nAAAASUVORK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x112355b10>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (logistic)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's see if the same technique will improve the prediction of the 1-layer neural network:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \\\n",
    "      beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2))\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false,
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 590.374023\n",
      "Minibatch accuracy: 12.5%\n",
      "Validation accuracy: 25.4%\n",
      "Minibatch loss at step 500: 199.734955\n",
      "Minibatch accuracy: 80.5%\n",
      "Validation accuracy: 78.8%\n",
      "Minibatch loss at step 1000: 116.521393\n",
      "Minibatch accuracy: 78.9%\n",
      "Validation accuracy: 81.0%\n",
      "Minibatch loss at step 1500: 68.802231\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 82.9%\n",
      "Minibatch loss at step 2000: 41.379978\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 84.4%\n",
      "Minibatch loss at step 2500: 25.250950\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 85.2%\n",
      "Minibatch loss at step 3000: 15.515349\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 86.5%\n",
      "Test accuracy: 93.3%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Finally something above 90%! I will also plot the final accuracy by the L2 parameter to find the best value."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:    \n",
    "  with tf.Session(graph=graph) as session:\n",
    "    tf.initialize_all_variables().run()\n",
    "    for step in range(num_steps):\n",
    "      # Pick an offset within the training data, which has been randomized.\n",
    "      # Note: we could use better randomization across epochs.\n",
    "      offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "      # Generate a minibatch.\n",
    "      batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "      batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "      # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "      # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "      # and the value is the numpy array to feed to it.\n",
    "      feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "      _, l, predictions = session.run(\n",
    "        [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYIAAAEOCAYAAACEiBAqAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAIABJREFUeJzt3Xuc1nP+//HHS8VK2QkhdKBIYndYEhaDdfjayHmdjfNZ\n5FB+WMd8C+VMzodW0mpzyGHVMtq0kq9QbSllEhK2krRU0+v3x/szXKZrZq6Zua7rcx2e99ttbjOf\n6/ocXtd1feZ6fT7vo7k7IiJSvNaKOwAREYmXEoGISJFTIhARKXJKBCIiRU6JQESkyCkRiIgUOSUC\nyQozW8fMVpvZZnHH0lBm9i8zO74J239sZrumOaa1zew7M9s0nftN2P8QMzurkdseaGaz0x1T3Mxs\nZzN7I+44MkGJIBL9Uy2NfqrMbHnCY8c1Yb9N+hIpMEXZacXdu7j7pKbso+Z55O4r3L21u3/Z9AjX\nONZmwJHAo9FySzMbZWaVUTLvkcJu8vqzTnbh4u7vAlVmtl+MoWWEEkEk+qda393XB+YBf0x47Om4\n48sUM2uWzcNlZKfZfQ0py9W4UnAa8Jy7r4qWHXgDOBZYFFtUdcjAe20kT2bDgXPSfKzYKREkZ9T4\n0jKztczsGjObY2ZfmdkwM1s/eq6lmT1tZv8xs8XR1duvzew2YBfg4ejO4tY1DmTWzMyeNbMvzWyR\nmf3DzLZJeL6lmd1lZp9G+37DzNaKniuLjrUkulo7Nnr8F1ePZna2mY2N/q6+0jnHzD4GpkaP32dm\n883sWzN7O7EoI4rx2ui1f2tmk8xsYzN72MxuqvF6/m5mZ9fx3h5uZp+Y2cLqbc1s3Wi/nRP2s4WZ\nfV/9Htc4xtnR+3SPmS0C+iU8PtPMvjGzFxOv5szsj2Y2K3qPb098j8zsf83swYR1u5rZymTBR8+9\nEX3WC83scTNbL+H5BWZ2qZlNA75NeGz36BxKvPNcFn0WG5vZRmb2cnRufWNmz5nZJtH2a5xHNa9Y\nzayNmQ2Ptp9jZpfXeL/Gmdmd0Tk0u56r2v8B3qxecPf/uvs97v42jbjSj/5v5kaxf2hmB0eP1/u5\nm9nhZvZBFPebZtatrve6xnGr36MzLRTP/cfMhtRYp+Y50y56qvr1z4riPjRargAONLOMXNTExt31\nU+MH+ATYt8Zj/QgnxybA2oTb5kei5y4CRkaPrwX8Dlg3eu5fwHF1HKsZcCKwbrT9vcC/Ep5/BHgV\naEtITntEv7sA3wGHRcfcENgh4ZjHJ+zjbOC16O91gNXAi8D6wDrR4ydGy82AK4FPgWbRc9cA/wds\nGS3/Nlp3T2BuwnHaAcuAkiSvs/q4rwCtgY7AnOo4gYeBaxPWvwJ4ppb37GxgBeHK1aJ9/wmYDnSO\nXsMNwOsJcX1H+IJrBlwO/Jhw7P8FHkzYf1dgRcLyvxLW7QqURfvZGJgI3Jyw7gJgUnSerJPw2O5J\nXsdg4O/Ra9gYOCQ6B1oDo4HhNWI4rsb7WQVsFi2PBJ6JzqPOwNzq9aP360fghOhYFwOf1HFOLgW6\n1/Lc10CPev5/DgRmJSwfDWwc/X1CtP8N6vvcgZ7A50BpFPcZwEfAWrW917Wcc88C6wGdgMXAXtHz\ndZ0z1du2S7LfH4EucX9PpfMn9gBy8YfkiWAusFvC8pbA99Hf5xJundf456HGl3IKx940+gdfG2hO\n+MJb46QDrgOeqmUfqSSCXeuIwYDvga2j5UrgD7Ws+zGwR/T3pcCztaxXfdw9Ex67BHgx+nsvYHbC\ncx8CvWrZ19nAzBqPvc4vvyhbRO9dW+BM4B81Xt9CGpEIksTyJ+CthOUFwJ9qrLNGIgBOBmYBv65l\nvz2Bz+v4TKvfz82ic2UV0DHh+YuAlxPerw8TnmsTnWPrJznuWtF+O9QSV4MTQZLnZwD71/G5/zH6\n+1HgyhrbVgK71PZe13LO7Zjw2PPARSmcMz+9v0n2+w2wc13vQb79qGgode2Bl6OihUXAewBmtgHh\nqn088KyFIpwBqd46RsUug6Pb+SWEfxIIV/jtCFcqc2uJZ04TXs9nNeK4MrpFXkwoB14H2Ch6evNa\nYgAYRribIPo9rAHHnUf4IsPdxwNrmdmuZvZbQkJ8pY79zK+x3BEYmvD5fEX4p94iOsZP63v4b/68\nnjiTMrN2ZjbSzD6LPq+H+fl9qvZZkk0T97ErcCtwqLtXFx+1MrNHzGxetN+/J9lvbTYlJLfE92Qe\n4XOrllipvDxav1XNHbn7asIVe+tUDmxmW0fFXd+Z2Ve1rHN6VLyzKDq/OhO9tlo+91ejTTsC/6/6\nM4223ajG66rzvY4sTPh7OT+/7rrOmbq0BpakcNy8oUSQus8IdwkbRD9t3H09d1/koQXHte7ejXCF\nczShYg3qL1M9FdgP2NvdS4Bto8eNcMWzivCPU9N8QvFQMt8DLROWkzUx/CkuM/sDcAHQ293bABsA\nP/BzPclntcQA8CRwlJntRPgHeqmW9aq1T/i7A/BFjX2dFP2McPeqOvZT8339FCiv8fm0cvcphPfx\np+NGSTrxy6Tm+9WO2t1KKP7aLvq8zmDNSvBaP/OoTP9Z4HR3n5nwVP8opt9F+z2gxn7rOo++JLqK\nT3isA41MdoR6o23qXQtw99keGlW0dveNaz5vZlsDdwFnVH8uhAuYxNdW2+c+H/hzks/0ucQQGv7y\nfjKf2s+ZpPs1s60I/xtNuQjLOUoEqXsAGGRmWwBEFXy9or/3M7Nu0RfMMsKXd/XJvBDYqo79tiac\nWIvNrBUwoPoJD602ngTujI63lpntER1nGPBHM+sd3VVsZGY7RJu+T/hyXsfMtgXK63ltrQlXQv8x\ns3WAGwl3BNUeAW42sy2j11taXZnn7p8Q7mIeI5TtrqJu/cxsfTPrREg+IxKeGwYcQ0iiT9azn5oe\nAK6xqKI9qjw9InruBaCHmR1koXXJpUBJwrbvA/uY2WZm1oZQTl2b1oTPeJmZdQD6phqgmbUA/gYM\ndfcxSfa7HFhqZhsBV9d4vtbzyN1XEOoUbrbQuKAz0If6785q8zKhHiQx9rXN7FfR4jrReZKKVoT/\nhW/MrLmZncOaFzC1fe4PAhea2e+iGFqZ2SEJcTTVUGo5Z6L3dAlrvud7A2Oju8qCoUSQXLIPeRAw\nFnjdzL4FJgA7Rs9tTih7XEoo4xzj7iOj524HTolaLAxMst9HCGWOXwIfEIqYEvUhXH1Mida7ATB3\nnwP0Bq4iFOVMBraLtrmFUN75FeFkr/mFUPP1vQj8MzrOx9F2Xyc8P5BwpV/92u/nl4niCWB76v/y\n9mg/HwDvEBLHUz896T6XUBn4nYc22ylz9xHA3cDfoqKV94A/RM99CRwXPf81oahoKqHSjyimMcC/\nCZW/o5PEXe3PhEryJcAowtV9bevWfGwrQuufflFLlOoWRBsBtxHKpv9DOAdq3lklO48Sj3UO4Sp7\nHjCOUOdRV7Pnur7IHgd6m1nzhMfmEe6cNiC0nFluZmvcAaxxkHB1PZTQ2OBzQnHM5BrrJP3c3X0i\noa7jgahYaCbhc6yOPZUv45rr/LRc1zkT+TOhuHdR9UUfobJ7aArHzSuWSmIzsz6EW2CAh9z9LjO7\ngfBFtJpwtVLuSTq3mFkloWnXamClu6fSGUXyiJntD9zr7ikVJ9Szr6eA6e5+c9Mjq/UYzQiJt5c3\nsaNXobLQZHWWuz9Y78rpOV7GP/emMrOdgVvdfZ+4Y0m3ehOBmXUHniZcyawiVOCdA3zl7suidS4k\nlJmem2T7uYRyz8Vpjl1ygJmtTbgyrnD3wU3cVxfgXaCbuy9IR3wJ+z6IcLW/gnAXdRKhNVZ9RVmS\nYZn83CU1qRQNdQMmufuPUSXOeOCI6iQQWY9wxZ+MpXgcyTNRK49FhM//vibuaxCh+OD6DH0Z7EVo\nFvwlsA9wuJJA/LLwuUsKUrkj2BZ4DtiNUKY6Dpjs7n0s9Aw9mVBeuo+7/yfJ9nOj56sI5ZYPpfcl\niIhIU6RaR3AqcD6htcR04Ed375vwfD9CT9rrkmzbzt0XmFlbQmXrBe4+Icl6BVULLyKSDe7e5OEu\nUiqycffH3H1ndy8jXN3PqrHKcMJohcm2XRD9/prQGqPWyuJM9pzL1s+1115bEMdMxz4bs4+GbJPq\nuvWt19Tn8+UnrteRi+dnvpyb9a2TLiklguhqnqjd9OHA8KiCp9ph/NwjNnG7llHbeCwMzHUAMK2p\nQeeysrKygjhmOvbZmH00ZJtU161vvfqer6ysTOk4uS6OczNTx23qPvPl3GzocRsr1aKh8YT2wyuB\nS9y9wsyeJfQ+XE1oY3yOhyKgdoQmpr2iDkijCW13mxPGxknWlh4z83RmOJF0KS8v5/HHH487DJE1\nmBmehqKhlBJBNigRSK6qqKiI7WpapC5KBCIiRS5diUDt+0XqUVFREXcIIhmlRCAiUuRUNCSSYR99\nBMuWwVZbQZs2cUcjhSRdRUPN619FRBpr7lzYYw9o3x7mzIFmzUJC6Nw5/E78ad8eWrSIO2IpRroj\nEKlHY1sNrV4N++4LhxwCl14K7rBoUUgIc+eu+bNgAWy++c+JITFZbL01rL9++l+b5DfdEYjkuPvv\nhxUr4OKLw7IZbLhh+OmRpH/9ihXw6ae/TBTvvBN+z5sX/u5S25x0Ik2gOwKRDJg7F3bdFSZMgK5d\nm76/226Dl16Cf/wD1lITD4mo+ahIjlq9Gk47Dfr3T08SgHBX8f338JDG7pUMUCIQqUdD+xHULBJK\nh+bN4dFH4eqrYf789O1XBJQIRNJq7ly47jp47LHQQiidtt8eLrgAzj03VDyLpIvqCETSpGYroUxY\nsQJ23hn69YMTTsjMMSR/qI5AJMdkokioprXXhkcegb594auvMnccKS5KBCL1SKWOIJNFQjXtsguc\ncgpceGFmjyPFQ4lApIky0UqoPtdfD1OmwHPPZed4UthURyDSRPfeC089Bf/8Z+bvBhKNHw/HHQfT\npmkMo2Kl+QhEckC6O4411Pnnw3//G5qWSvFRZbFIltRWRxBHkVBNAweG3savvRbP8aUwKBGINNJ9\n92W+lVB9WreGBx+Es84KQ12LNEaqk9f3Ac6IFh9y97vM7AagN2Hy+oVAubt/mWTbg4A7CEnnEXcf\nVMsxVDQkeWPu3DBw3FtvxXc3kKi8PCSFu++OOxLJpqzVEZhZd+BpYBdgFfAKcA7wlbsvi9a5ENjO\n3c+tse1awCxgP+ALYDJwrLvPTHIcJQLJC9noONZQixaFnscjR8Lvfx93NJIt2awj6AZMcvcf3b0K\nGA8cUZ0EIusR7gxq6gHMdvd57r4SGEG4ixDJGzXrCHKhSKimDTaAe+6B008PlcciDZFKIpgG7Glm\nbcysJXAw0B7AzG4ys0+B44E/J9l2cyBxiKzPosdE8lI2O4411BFHwG9+E/oYiDREvRPTuPtMMxsE\njAWWAVOAqui5q4GrzawfcCFwXVOCKS8vp1OnTgCUlJRQWlr608xQ1VdlWtZytpfLysqoqKhg9Wq4\n4YYyrrwSFiyoYMGC3Igvcfmee8r4zW9gyy0r6No1/ni0nN7l6r8rKytJpwb3IzCzAcB8dx+a8Fh7\n4GV336HGuj2B69z9oGi5P+DJKoxVRyC57p57YPjw7Hcca6hhw8JENpMnh7GJpHBltR+BmbWNfncA\nDgeGm1nipHmHATOSbDoZ6GJmHc1sbeBY4IWmhSySXRUVFTldJFTTiSeGuY9vuSXuSCRfpDpn8Sgz\n2wBYCZzn7kvN7FEz24ZQSTyP0JIIM2tHaGLay92rzOwC4DV+bj6aLGGI5KzqjmNXXpkbTUXrYwYP\nPAA77RTqDbbbLu6IJNdpiAmReuRLkVBNQ4fC44+Hvg75FLekTmMNiWRBrnUca4jq/g69e8Mll8Qd\njWSCEoFIhi1bBnvtBbvvXsE995TFHU6jfPwx9OwJkyZB585xRyPppkHnRDKoqgqOPx523BGOPDLu\naBqvS5dQt3HmmZrnWGqnOwKRJC6+GKZOhVdfhRYt4o6maaqqYPfdw9ATO+0Ev/oVrLtu/b9btAgV\nz5K7VDQkkiH33BMmm5k4sXAmfJk9OzQnXb48DEHxww+//J3ssdWrQ1JITBA33hgmw5HcoEQgkgEv\nvQRnnBEqh7faKjxWUVHxUw/PYrJq1S+Tw4wZYZTTWbOgVau4oxNQHYFI2n3wQfii+9vffk4Cxax5\n8/CF37YttG8PBxwAe+8Nd9wRd2SSbrojEAE+/xx22y0MzXDMMXFHk7s+/jhMzTlzZkgQEi8VDYmk\nSXUz0aOPDi1spG7nnx/GMLr99rgjESUCkTSoqoLDDw9Xtw8/nLyVTLHWEdTmyy+he3d47z3o2DHu\naIqb6ghE0uDSS+H77+H++9VUMlWbbhruCv6cbAYSyUu6I5Cide+9oaloITUTzZalS2HrrWHcONhh\nh/rXl8xQ0ZBIE7z8cmgmOmGCWgg11h13wD/+AS++GHckxUtFQyKN9MEHcMopMGpUakkgcXYo+dm5\n54be1//8Z9yRSFMpEUhR+fxzOOSQUCy0225xR5Pf1lkHbrgB+vXTOEb5TkVDUjTUTDT9qqqgtBRu\nuikMdy3ZpToCkQZIpZmoNM5LL8EVV8CHH2oCnGxTHYFIA1x2WeObiaqOoG4HHwwbbghPPhl3JNJY\nSgRS8O69Nwwn/eyzoUespJcZDBoE114bBqeT/JNS0ZCZ9QHOiBYfcve7zOwW4BDgR2AOcKq7L02y\nbSXwLWGS+5Xu3qOWY6hoSNLu5Zfh9NN/OZqoZMZhh8Gee4ZOepIdWasjMLPuwNPALsAq4BXgHGAr\n4HV3X21mAwF39zWq4MxsLvA7d19cz3GUCCStZs0Kk7E8/7xaCGXDv/8NZWXhfS8piTua4pDNOoJu\nwCR3/9Hdq4DxwBHuPs7dV0frvA1sUVusKR5HJK0GDICLLmp6ElAdQWq22w569QoT4Eh+SeULehqw\np5m1MbOWwMFA+xrrnEa4U0jGgbFmNtnMzmx8qCKpmzcv9Hg9//y4Iyku118PDzwAX3wRdyTSEM3r\nW8HdZ5rZIGAssAyYAlRVP29mVxHK/ofXsos93H2BmbUlJIQZ7j4h2Yrl5eV06tQJgJKSEkpLS38a\n9bH6qkzLWk5luW/fCvbfH9q0afr+ysrKYn89+bR82mlwzjkV9O2bG/EU0nL135WVlaRTg/sRmNkA\nYL67DzWzcuBMYF93/zGFba8FvnP3IUmeUx2BpMU334QB0aZPh802izua4rNoEXTtGirot9km7mgK\nW1b7EURX85hZB+BwYLiZHQRcDhxaWxIws5Zm1ir6ez3gAEJRk0jG3H03HHVU+pJA4tWY1G+DDaBv\nX7j66rgjkVTVWzQUGWVmGwArgfPcfamZ3Q2sTSjuAXjb3c8zs3aEJqa9gE2A0Wbm0bGecvfX0v8y\nRIJly+C++8LVqMSnT59wVzZ5MuyyS9zRSH00xIQUlNtvD/ML/PWvcUciDzwAI0eGOQs0pEdmaKwh\nkRpWrIDOnWH0aNh557ijkZUrYfvtQ1HdAQfEHU1h0lhDIjU89RRsu236k4DqCBqnRYvQl6N/f1i9\nuv71JT5KBFIQVq8O49307x93JJLoyCOhefNQRCS5S0VDUhBGj4abb4Z33lF5dK55/XU466wwBMXa\nGvQvrVQ0JBJxh4EDw92AkkDu2XffUHfz8MNxRyK1USKQvFdRAUuWhNEvM7P/iszsuIgMHAg33hia\n90ruUSKQvDdwYJg3V7Nj5a4dd4R99gnNeyX3qI5A8tp778Ghh8KcOWEydcldc+bArruGz6xDh7ij\nKQyqIxAhtBTq21dJIB907gzXXBPqDObPjzsaSaREIHlr9uzQIuXMDA9urjqC9OnTB847LxQTKRnk\njlTHGhLJObfdBueeC61bxx2JNETfvuH3PvvAG29A+5qzm0jWqY5A8tKCBdC9O3z0EbRtG3c00hhD\nhoQBApUMGi9ddQS6I5C8dMcdcOKJSgL5THcGuUOJQPLOkiWhc9J772XneBUVFT/NFCXppWSQG5QI\nJO/cfz/88Y/QsWPckUg6KBnET3UEklf++1/Ycsswxv3228cdjaST6gwaTnUEUpQefxx69FASKES6\nM4iP+hFI3li1Cm69NftDTasfQfb07at+BnHQHYHkjb/+FbbYAnbfPe5IJJN0Z5B9Kd0RmFkfM5sa\n/VwUPXaLmc0ws/fNbJSZrV/LtgeZ2Uwzm2Vm/dIZvBSPxKGms00thrJPdwbZVW8iMLPuwOnAzkAp\n0MvMtgJeA7q7eykwG7gyybZrAfcABwLdgePMbNv0hS/F4tVXQzL4n/+JOxLJFiWD7EnljqAbMMnd\nf3T3KmA8cIS7j3P36plI3wa2SLJtD2C2u89z95XACKB3OgKX4hLnxDOqI4iPkkF2pJIIpgF7mlkb\nM2sJHAzULLU7DXglybabA4kf32fRYyIpmzgRPv0Ujjkm7kgkDkoGmVdvZbG7zzSzQcBYYBkwBaiq\nft7MrgJWuvvwpgZTXl5Op06dACgpKaG0tPSn8tnqqzItF9/yoEHQu3cFEybEc/yysrKcej+KcXmn\nnSo44ADYZ58y3ngD5szJrfiytVz9d2VlJenU4A5lZjYAmO/uQ82sHDgT2Nfdf0yybk/gOnc/KFru\nD7i7D0qyrjqUyRqmT4f99oNPPoF11407GonbkCFw553hp3dvzVGd1YlpzKxt9LsDcDgw3MwOAi4H\nDk2WBCKTgS5m1tHM1gaOBV5oatBSPG65BS66KN4kkHg1JvHq2xceegiuvjpMcPP++3FHVBhS7VA2\nysymAc8D57n7UuBuoBUw1szeM7P7AMysnZmNAYgqly8gtDCaDoxw9xnpfhFSmObNgzFjQvmwSLUD\nDggJ4E9/ggMPhNNPD8OSS+NprCHJWX36hCkob7kl7kgkVy1ZAgMGwKOPhruFvn2LqwgxXUVDSgSS\nkz79FEpLYdo02GyzuKORXDdnDlxxBbz7bmhqfOyxxVF/oMnrpWC5hyko+/bNjSSgOoLc17kzjBoF\nTz4ZpjDdfXd4++24o8ofSgSSc555JtwRXHFF3JFIvtl7b5g8Gc4+G448Eo4/PpxLUjcVDUlO+c9/\nwhDTo0dDz55xRyP5bNmyUL90773hDrN/f2jVKu6o0ktFQ1KQLrsMjj5aSUCarlUruOGG0MKoshK6\ndg2VylVV9W5adHRHIDlj3Dg47bTQiax167ij+VmF5iwuCO+8A5dcAsuXwwMPhAmO8p3uCKSgLF8e\nynXvuy+3koAUjh49YMKE0Ajh8MPhu+/ijih36I5AckK/fqED2YgRcUcixaC8HDbZBAatMdhNflE/\nAikYU6aEHqJTp4Z/TpFM+/LL0CjhrbdC3UG+UtGQFIRVq+CMM8KVWa4mAfUjKDybbgpXXgkXXxz6\nrRQ7JQKJ1Z13QklJuFUXyaYLLwyticaMiTuS+KloSGIzd26owHv7bejSJe5opBiNHQvnnBNaqv3q\nV3FH03AqGpK85h7+AS+/XElA4rP//vCb34R5DoqZEoHE4i9/ga+/Dk35cp3qCArbkCHhp5inwVQi\nkKz7+uvQg/jhh6FFi7ijkWK35ZZhzovLL487kviojkCy7sQTQ6uN226LOxKRYPly6NYtjF66995x\nR5O6dNUR1Dt5vUg6vfoqTJwY+gyI5IqWLWHw4NCS6L33oHmRfTOqaEiyZtmyUEE8dCist17c0aRO\ndQTF4cgjYaONwvlZbJQIJGv+/GfYa68w56xIrjGDu+4KI5Z+803c0WRXSnUEZtYHOCNafMjd7zKz\no4DrgG7ALu7+Xi3bVgLfAquBle6edMw/1REUtsmT4ZBDwtSTG20UdzQitevTB374IYxQmuuyNtaQ\nmXUHngZ2AVYBrwDnAC0IX+4PAJfVkQjmAr9z98X1HEeJoECtXAk77xxmHDvhhLijEanbkiWw7bbw\n8suw005xR1O3bHYo6wZMcvcf3b0KGA8c4e4fuftsoL4gLMXjSIEaPBjatQvTBuYj1REUl5ISGDAg\nVBwXy7VpKl/Q04A9zayNmbUEDgbaN+AYDow1s8lmdmZjgpT8NXt2aCY6dGgogxXJB6eeCitWhI6P\nxaDeRlLuPtPMBgFjgWXAFKAhk73t4e4LzKwtISHMcPcJyVYsLy+nU6dOAJSUlFBaWvrTzFDVV2Va\nzp9ld7jxxjKuugoqKyuorMyt+FJdLisry6l4tJz55fHjKzj1VOjfv4zDDoP/+7/ciK/678rKStKp\nwR3KzGwAMN/dh0bLbwCX1lZHUGPba4Hv3H2NkT1UR1B4Hn0U7r8/DCrXrFnc0Yg0XHk5bLwx3HJL\n3JEkl9VB56KrecysA3A4MLzmKrVs19LMWkV/rwccQChqkgL35ZfQv38YRiLfk0Di1ZgUl4ED4bHH\n4KOP4o4ks1KtxB1lZtOA54Hz3H2pmR1mZvOBnsAYM3sFwMzamVn1CN+bABPMbArwNvCiu7+W5tcg\nOcY9jN1y+unw29/GHY1I4xXLBDYaa0jSbvBgePrpMFF4Po7xLpJoxYpwQTNoEBx6aNzR/JLmLJac\n9Prroa/ApEnQoUPc0YikR65OYKOJaSTnzJ8fksBTTxVWElAdgVRPYDN4cNyRZIYSgaTFDz/AEUfA\npZfCvvvGHY1I+g0ZArffXpgT2KhoSJrMHc44A777Dp55Rh3HpHBde21oQTRiRNyRBCoakpzx0EOh\nr8CjjyoJSGHr1w/+9S8otNJCJQJpkrffhquvhtGjoVWruKPJDNURSLXqCWwuughWrYo7mvRRIpBG\nW7gQjj46dBrbZpu4oxHJjiOPhLZtC2sCG9URSKOsXBlaUuy1V5jIQ6SYTJ0K++0HM2bAhhvGF4f6\nEUis+vYNlWYvvghr6b5SitD554dz/+6744tBlcUSm6efhhdeCEP0FkMSUB2BJHPDDaGV3PTpcUfS\ndEXwbyzp9OGHoaLsb3+DNm3ijkYkPhtuGBpKXHJJ/o9DpKIhSdnixbDLLuFKKF9nGxNJp5UrwzhE\nAwfGMw6R6ggkq1avhl69oGvX0LtSRIK//x0uuACmTYN11snusVVHIFl1/fXw/fe5O0FHJqmOQOpy\n4IFhsvuopYOVAAAPeklEQVS77oo7ksard6pKkRdfDL2G330XWrSIOxqR3DN4MOy+O5x8MmyySdzR\nNJyKhqROs2fDHnuEZLDrrnFHI5K7Lr881KM9/HD2jqk6Asm4ZcugZ8/QSuiss+KORiS3ffttKCIa\nMwZ+97vsHFN1BJJR7mGqyZ494cwz444mXqojkFT8+tdw443Qp0/+NSdVIpCkhgyBuXPhnns0oqhI\nqk49FZYvh5Ej446kYVJKBGbWx8ymRj8XRY8dZWbTzKzKzHaqY9uDzGymmc0ys37pClwyZ/RouO02\nGDUqt6bli0tZWVncIUieaNYM7rwTrrgiJIR8UW8iMLPuwOnAzkAp0MvMtgKmAocDb9ax7VrAPcCB\nQHfgODPbNg1xSwYsXBg6il12WUgGhTTdpEi27LlnKFK99da4I0ldKncE3YBJ7v6ju1cB44Ej3P0j\nd58N1FVw0AOY7e7z3H0lMALo3eSoJa3cQ/PQHXYIX/5Tp4YTWQLVEUhD3XJLGIwuX6a1TCURTAP2\nNLM2ZtYSOBhon+L+NwcS34rPosckR8yaFeYYHjoUxo4NXeVbtow7KpH81rEjnHdemNEsH9Tboczd\nZ5rZIGAssAyYAlRlIpjy8nI6deoEQElJCaWlpT+Vz1ZflWk5Pctjx1YwYgQ8/3wZ11wD229fweLF\nALkRXy4tl5WV5VQ8Ws6P5d12g8ceK+Ott2DlyvTsv/rvyspK0qnB/QjMbAAw392HRstvAJe6+3tJ\n1u0JXOfuB0XL/QF390FJ1lU/giyZODE0Cd1yS7jvPtUFiGTK8OGhBd4772RmyPas9iMws7bR7w6E\nCuLhNVepZdPJQBcz62hmawPHAi80MlZpom+/DberRx0F110XegsrCdQv8WpMpCGOOw7WXhueeCLu\nSOqWao4aZWbTgOeB89x9qZkdZmbzgZ7AGDN7BcDM2pnZGICocvkC4DVgOjDC3Wek/VVIvUaPhu7d\noaoqTKRx9NHqHyCSaWahOelVV8HSpXFHUzsNMVHgPvsMLrwQZs6EBx8MTdtEJLtOPTUMRjdwYHr3\nqyEmpE5VVXDvvbDjjmHijPffVxIQicvNN4fB6ObMiTuS5JQICtDUqfD738OIETB+fKgPyPaEGYVE\ndQTSVO3ahY6al10WdyTJKREUmFdeCf0CTj0V3nwTunWLOyIRAbj44jDn97hxcUeyJtURFJAvvgjD\n344cqWIgkVz03HNhwvv334fmaZgWTHUE8gtVVXDSSXDuuUoCIrmqd+9Qafzgg3FH8ktKBAVi4MCQ\nDK66Ku5ICo/qCCRdzOCOO8Ic4IsWxR3Nz5QICsDEiWGAq7/8JQyDKyK5a4cdfu7UmStUR5DnFi8O\nTUTvvhsOOSTuaEQkFd98A9ttB+++27Te/ZqzWHAPVxZbbBF6L4pI/vjiC9hss6btQ5XFwgMPwCef\nhLHPJXNURyCZ0NQkkE5paMAkcZg6Fa65Bt56S53FRKRpVDSUh5Yvh513DpNenHJK3NGISFxUR1DE\nzjorJINhwzSCqEgxUx1BkfrrX+GNN+D++5UEskV1BFLoVEeQRz75BM4/P4wn1Lp13NGISKFQ0VCe\nWLkyDB1xzDHQt2/c0YhILlAdQZG58sowcuGLL2Zm7lMRyT+qIygiY8eGiuHHH1cSiIPqCKTQqY4g\nxy1cCOXlIRG0bRt3NCJSiFIqGjKzPsAZ0eJD7n6XmbUBngE6ApXAMe7+bZJtK4FvgdXASnfvUcsx\nVDRUw+rVcPDBYY6BAQPijkZEck3WiobMrDtwOrAzUAr0MrPOQH9gnLt3BV4HrqxlF6uBMnffsbYk\nIMkNGQLffZdboxSKSOFJpcS5GzDJ3X909ypgPHAEcCjwRLTOE8BhtWxvKR5HErzzThhDaPhwaNEi\n7miKm+oIpNCl8gU9DdjTzNqYWUvgYKA9sIm7LwRw9y+BjWvZ3oGxZjbZzM5MR9CFbulSOO640Gms\nY8e4oxGRQldvZbG7zzSzQcBYYBkwBahKtmotu9jD3ReYWVtCQpjh7hOSrVheXk6nTp0AKCkpobS0\nlLKyMuDnq7JCX9577zLOPhu6d69gww0Bciu+YlwuKyvLqXi0XLzL1X9XVlaSTg3uR2BmA4D5QB9C\n2f9CM9sUeMPdu9Wz7bXAd+4+JMlzqiwGHn0Ubr89FA2tu27c0YhILstqP4Loah4z6wAcDgwHXgDK\no1VOAZ5Psl1LM2sV/b0ecAChqEmSWLoULrsMRoxQEsgliVdjIoUo1X4Eo8xsA2AlcJ67L42Ki0aa\n2WnAPOAYADNrR2hi2gvYBBhtZh4d6yl3fy3tr6JAjBoFe+0F3bvHHYmIFBMNMZFD9t0XLrgAjjgi\n7khEJB9orKECM29e6Dj2+eeacUxEUqOxhgrMU0/B0UcrCeQi1RFIoVMiyAHuYSyhk0+OOxIRKUYq\nGsoBkyfD8cfDrFmadUxEUqeioQLy5JNw0klKAiISDyWCmK1YAc88AyeeGHckUhvVEUihUyKI2auv\nQteusNVWcUciIsVKdQQxO+ooOPBAOFPD8YlIA6kfQQFYvBg6dQp9CEpK4o5GRPKNKosLwMiR4W5A\nSSC3qY5ACp0SQYyefFJ9B0QkfioaismcObD77vDZZ5qBTEQaR0VDeW7YMDj2WCUBEYmfEkEMNKRE\nflEdgRQ6JYIYTJwYBpfbaae4IxERUR1BLM4+G7bcEvr3jzsSEcln6keQp374ATbfHN5/H9q3jzsa\nEclnqizOU2PGQGmpkkA+UR2BFLpUJ6/vY2ZTo5+LosfamNlrZvaRmf3dzH5dy7YHmdlMM5tlZv3S\nGXw+Ut8BEck19RYNmVl34GlgF2AV8ApwLnAW8B93vyX6gm/j7v1rbLsWMAvYD/gCmAwc6+4zkxyn\n4IuGvv4att4a5s+H1q3jjkZE8l02i4a6AZPc/Ud3rwLGA0cAhwJPROs8ARyWZNsewGx3n+fuK4ER\nQO+mBp2vRoyAXr2UBEQkt6SSCKYBe0ZFQS2Bg4H2wCbuvhDA3b8ENk6y7ebA/ITlz6LHipKKhfKT\n6gik0DWvbwV3n2lmg4CxwDJgClCVbNU0x1ZQZsyAL76A/faLOxIRkV+qNxEAuPtjwGMAZjaAcJW/\n0Mw2cfeFZrYp8FWSTT8HOiQsbxE9llR5eTmdOnUCoKSkhNLSUsrKyoCfr8rydfmmmyr4/e+hWbPc\niEfLqS+XlZXlVDxaLt7l6r8rKytJp5T6EZhZW3f/2sw6AK8CPYGrgEXuPqiOyuJmwEeEyuIFwDvA\nce4+I8kxCrayePXqMO/ASy/BDjvEHY2IFIps9yMYZWbTgOeB89x9KTAI2N/Mqr/oB0aBtTOzMQBR\n5fIFwGvAdGBEsiRQ6N58EzbcUEkgXyVejYkUolSLhvZK8tgi4A9JHl8A9EpYfhXo2oQY896TT8JJ\nJ8UdhYhIchpiIsOWLw9DSsyYAZtuGnc0IlJINMREnnjuOejZU0lARHKXEkGGqe9A/lMdgRQ6JYIM\nWrAAJk2C3kXbl1pE8oHqCDJo8GCYPh0efTTuSESkEKmOIA+oWEhE8oESQYZ88AEsWQJ7rdHwVvKN\n6gik0KXUjyDfrVwZvpQXLw4/ixb9/HfiT5cucP758OukMys0zLBhcOKJsJZSrYjkuJyqI+jXr/Gx\nrFyZ/Mt98WL473+hpATatKn7Z/x4eOUVuOii8NPYhLBqVZiB7I03YNttG/2SRETqlK46gpy6Iygp\nafy2zZtDt27Jv+Bbt07tyvyMM2DWLLjppnB30NiEMG4cdOigJCAi+SGn7ghyJRb4OSE05g7hhBNg\nt93gggsyG6NkR0VFxU+jQIrkErUayrBttgmtft56C2bPDncIN94I335b93ZLl4ZRRo89Njtxiog0\nlRJBPRqaEEaNgrIy2GijrIYpGaS7ASl0SgQpSjUhDBumkUZFJL8oETRQXQnh009D/4Feverfj+QP\n9SOQQqdE0EjJEsIpp8Axx8A668QdnYhI6tRqKE1mzYIhQ0JLoe23jzsaESkG6Wo1pEQgIpKn1HxU\nJEtURyCFLqVEYGaXmNk0M/vQzJ4ys7XN7LdmNtHMPjCz582sVS3bVkbrTDGzd9Ibvkjmvf/++3GH\nIJJR9SYCM9sMuBDYyd1/QxiW4jjgIeAKd/8tMBq4opZdrAbK3H1Hd++RnrBFsmfJkiVxhyCSUakW\nDTUD1jOz5sC6wOfA1u4+IXp+HHBkLdtaA46T9+IoRsjEMdOxz8bsoyHbpLpufesVS9FPXK8zF8/P\nfDk3G3rcxqr3C9rdvwAGA58SEsC37j4OmG5mh0arHQNsUdsugLFmNtnMzkxDzDlNiaBp+8jFRFBZ\nWZnScXKdEkHTti/kRFBvqyEzKwFGAUcD3wLPAn8F3gXuBjYAXgAucve2SbZv5+4LzKwtMBa4IOFO\nInE9NRkSEWmgbA1D/QdgrrsvAjCzvwG7u/tw4MDosa2BP9YS5ILo99dmNhroAayRCNLxYkREpOFS\nKbv/FOhpZr8yMwP2A2ZEV/iY2VrA1cDQmhuaWcvq1kRmth5wADAtXcGLiEjTpVJH8A6hOGgK8AGh\n8vdB4Dgz+wj4N/C5uz8OoSjIzMZEm28CTDCzKcDbwIvu/lraX4WIiDRazvQsFhGReBRNs04REUlO\niUBEpMjlfCKIKpwnm9nBccciUs3MtjWz+81spJmdE3c8IonMrLeZPWhmT5vZ/vWun+t1BGZ2PfAd\n8G93fznueEQSRS3pnnD3k+OORaSmqB/Yre5eZ2ferNwRmNkjZrbQzD6s8fhBZjbTzGaZWb8k2/2B\n0Crpa0JrJZG0auy5Ga1zCDAG0AWKZERTzs/I1cC99R4nG3cEZvZ7YBnwZDRwXXX/g1mEfglfAJOB\nY919ppmdBOwErE/ozdwdWO7uh2c8WCkqjTw3dyRcZS2I1h/j7pqgVNKuCefnbcBFwGvu/np9x0ml\nZ3GTufsEM+tY4+EewGx3nwdgZiOA3sBMdx8GDKte0cxOBr7JRqxSXBp7bprZ3mbWH1gHeCmrQUvR\naML5eSEhUaxvZl3c/cG6jpOVRFCLzYH5CcufEV7gGtz9yaxEJBLUe266+5vAm9kMSiSSyvl5N2Es\nuJTkfKshERHJrDgTwedAh4TlLaLHROKmc1NyWdrPz2wmAuOXLX8mA13MrKOZrQ0cSxjOWiTbdG5K\nLsv4+Zmt5qPDgYnANmb2qZmd6u5VhCkwXwOmAyPcfUY24hGppnNTclm2zs+c71AmIiKZpcpiEZEi\np0QgIlLklAhERIqcEoGISJFTIhARKXJKBCIiRU6JQESkyCkRiIgUuf8PzwVYtRM1QyIAAAAASUVO\nRK5CYII=\n",
      "text/plain": [
       "<matplotlib.figure.Figure at 0x12d6175d0>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (1-layer net)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "na8xX2yHZzNF"
   },
   "source": [
    "---\n",
    "Problem 2\n",
    "---------\n",
    "Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  beta_regul = tf.placeholder(tf.float32)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "  \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 370.972351\n",
      "Minibatch accuracy: 8.6%\n",
      "Validation accuracy: 25.9%\n",
      "Minibatch loss at step 2: 952.214539\n",
      "Minibatch accuracy: 36.7%\n",
      "Validation accuracy: 40.9%\n",
      "Minibatch loss at step 4: 344.697205\n",
      "Minibatch accuracy: 62.5%\n",
      "Validation accuracy: 53.6%\n",
      "Minibatch loss at step 6: 10.702987\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 58.9%\n",
      "Minibatch loss at step 8: 5.507030\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 58.8%\n",
      "Minibatch loss at step 10: 6.428697\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 12: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 14: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 16: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 18: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 20: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 22: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 24: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 26: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 28: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 30: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 32: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 34: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 36: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 38: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 40: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 42: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 44: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 46: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 48: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 50: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 52: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 54: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 56: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 58: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 60: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 62: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 64: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 66: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 68: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 70: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 72: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 74: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 76: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 78: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 80: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 82: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 84: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 86: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 88: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 90: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 92: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 94: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 96: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 98: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Minibatch loss at step 100: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 59.7%\n",
      "Test accuracy: 66.8%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_bacthes = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    #offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    offset = step % num_bacthes\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 2 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Since there are far too much parameters and no regularization, the accuracy of the batches is 100%. The generalization capability is poor, as shown in the validation and test accuracy."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "ww3SCBUdlkRc"
   },
   "source": [
    "---\n",
    "Problem 3\n",
    "---------\n",
    "Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides `nn.dropout()` for that, but you have to make sure it's only inserted during training.\n",
    "\n",
    "What happens to our extreme overfitting case?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  \n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "  biases2 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  drop1 = tf.nn.dropout(lay1_train, 0.5)\n",
    "  logits = tf.matmul(drop1, weights2) + biases2\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "    \n",
    "  # Optimizer.\n",
    "  optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 522.686890\n",
      "Minibatch accuracy: 10.9%\n",
      "Validation accuracy: 29.1%\n",
      "Minibatch loss at step 2: 814.265259\n",
      "Minibatch accuracy: 43.8%\n",
      "Validation accuracy: 28.4%\n",
      "Minibatch loss at step 4: 300.448486\n",
      "Minibatch accuracy: 59.4%\n",
      "Validation accuracy: 54.8%\n",
      "Minibatch loss at step 6: 24.725126\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 65.4%\n",
      "Minibatch loss at step 8: 2.053320\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 66.7%\n",
      "Minibatch loss at step 10: 26.185101\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 62.2%\n",
      "Minibatch loss at step 12: 74.086914\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 66.1%\n",
      "Minibatch loss at step 14: 16.961481\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 67.2%\n",
      "Minibatch loss at step 16: 0.000043\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.5%\n",
      "Minibatch loss at step 18: 1.931412\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.1%\n",
      "Minibatch loss at step 20: 3.458273\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 22: 0.269873\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.5%\n",
      "Minibatch loss at step 24: 6.727062\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 67.4%\n",
      "Minibatch loss at step 26: 1.342917\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 28: 3.533568\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 66.1%\n",
      "Minibatch loss at step 30: 2.286844\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 66.2%\n",
      "Minibatch loss at step 32: 0.303651\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 34: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 36: 0.939636\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 66.4%\n",
      "Minibatch loss at step 38: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 66.4%\n",
      "Minibatch loss at step 40: 0.764459\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 65.7%\n",
      "Minibatch loss at step 42: 0.769536\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 44: 2.977767\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.4%\n",
      "Minibatch loss at step 46: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.5%\n",
      "Minibatch loss at step 48: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.6%\n",
      "Minibatch loss at step 50: 3.058991\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 52: 0.909829\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.7%\n",
      "Minibatch loss at step 54: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.7%\n",
      "Minibatch loss at step 56: 1.273322\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 58: 2.320458\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.5%\n",
      "Minibatch loss at step 60: 0.501477\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.6%\n",
      "Minibatch loss at step 62: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.7%\n",
      "Minibatch loss at step 64: 0.263712\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 66: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.9%\n",
      "Minibatch loss at step 68: 1.202995\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 70: 1.206359\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 72: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 74: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 76: 1.197501\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 78: 1.862640\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 80: 0.206966\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.4%\n",
      "Minibatch loss at step 82: 0.051613\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.3%\n",
      "Minibatch loss at step 84: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.4%\n",
      "Minibatch loss at step 86: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.5%\n",
      "Minibatch loss at step 88: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 66.7%\n",
      "Minibatch loss at step 90: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 66.7%\n",
      "Minibatch loss at step 92: 0.589971\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.1%\n",
      "Minibatch loss at step 94: 1.581323\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 66.6%\n",
      "Minibatch loss at step 96: 2.395708\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 98: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 67.9%\n",
      "Minibatch loss at step 100: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 66.7%\n",
      "Test accuracy: 73.7%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_batches = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    #offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    offset = step % num_batches\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 2 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The first conclusion is that 100% of accuracy on the minibatches is more difficult achieved or to keep. As a result, the test accuracy is improved by 6%, the final net is more capable of generalization."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "-b1hTz3VWZjw"
   },
   "source": [
    "---\n",
    "Problem 4\n",
    "---------\n",
    "\n",
    "Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is [97.1%](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html?showComment=1391023266211#c8758720086795711595).\n",
    "\n",
    "One avenue you can explore is to add multiple layers.\n",
    "\n",
    "Another one is to use learning rate decay:\n",
    "\n",
    "    global_step = tf.Variable(0)  # count the number of steps taken.\n",
    "    learning_rate = tf.train.exponential_decay(0.5, step, ...)\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    " \n",
    " ---\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's do a first try with 2 layers. Note how the parameters are initialized, compared to the previous cases."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 100\n",
    "beta_regul = 1e-3\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  global_step = tf.Variable(0)\n",
    "\n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal(\n",
    "        [image_size * image_size, num_hidden_nodes1],\n",
    "        stddev=np.sqrt(2.0 / (image_size * image_size)))\n",
    "    )\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))\n",
    "  biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "  weights3 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes2, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes2)))\n",
    "  biases3 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)\n",
    "  logits = tf.matmul(lay2_train, weights3) + biases3\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels)) + \\\n",
    "      beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(weights3))\n",
    "  \n",
    "  # Optimizer.\n",
    "  learning_rate = tf.train.exponential_decay(0.5, global_step, 1000, 0.65, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay2_test, weights3) + biases3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 3.272147\n",
      "Minibatch accuracy: 10.9%\n",
      "Validation accuracy: 34.4%\n",
      "Minibatch loss at step 500: 0.930104\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 85.6%\n",
      "Minibatch loss at step 1000: 0.904542\n",
      "Minibatch accuracy: 84.4%\n",
      "Validation accuracy: 86.6%\n",
      "Minibatch loss at step 1500: 0.575127\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 88.0%\n",
      "Minibatch loss at step 2000: 0.520965\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 88.2%\n",
      "Minibatch loss at step 2500: 0.531228\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 88.6%\n",
      "Minibatch loss at step 3000: 0.565390\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 88.7%\n",
      "Minibatch loss at step 3500: 0.573201\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 89.2%\n",
      "Minibatch loss at step 4000: 0.445847\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 4500: 0.444020\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 5000: 0.498980\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 89.5%\n",
      "Minibatch loss at step 5500: 0.493428\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 6000: 0.563357\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 89.8%\n",
      "Minibatch loss at step 6500: 0.390322\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.8%\n",
      "Minibatch loss at step 7000: 0.506404\n",
      "Minibatch accuracy: 87.5%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 7500: 0.472213\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 8000: 0.571431\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 8500: 0.409382\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 90.1%\n",
      "Minibatch loss at step 9000: 0.470708\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 90.0%\n",
      "Test accuracy: 95.8%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 9001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This is getting really good. Let's try one layer deeper with dropouts."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 256\n",
    "num_hidden_nodes3 = 128\n",
    "keep_prob = 0.5\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  global_step = tf.Variable(0)\n",
    "\n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal(\n",
    "        [image_size * image_size, num_hidden_nodes1],\n",
    "        stddev=np.sqrt(2.0 / (image_size * image_size)))\n",
    "    )\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))\n",
    "  biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "  weights3 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes2, num_hidden_nodes3], stddev=np.sqrt(2.0 / num_hidden_nodes2)))\n",
    "  biases3 = tf.Variable(tf.zeros([num_hidden_nodes3]))\n",
    "  weights4 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes3, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes3)))\n",
    "  biases4 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)\n",
    "  lay3_train = tf.nn.relu(tf.matmul(lay2_train, weights3) + biases3)\n",
    "  logits = tf.matmul(lay3_train, weights4) + biases4\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "  \n",
    "  # Optimizer.\n",
    "  learning_rate = tf.train.exponential_decay(0.5, global_step, 4000, 0.65, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay3_valid = tf.nn.relu(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay3_valid, weights4) + biases4)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "  lay3_test = tf.nn.relu(tf.matmul(lay2_test, weights3) + biases3)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay3_test, weights4) + biases4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 2.427155\n",
      "Minibatch accuracy: 7.0%\n",
      "Validation accuracy: 27.4%\n",
      "Minibatch loss at step 500: 0.363047\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 85.8%\n",
      "Minibatch loss at step 1000: 0.466222\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 86.2%\n",
      "Minibatch loss at step 1500: 0.249981\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 87.8%\n",
      "Minibatch loss at step 2000: 0.246187\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 88.7%\n",
      "Minibatch loss at step 2500: 0.279155\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 88.4%\n",
      "Minibatch loss at step 3000: 0.340918\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 3500: 0.344907\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 89.1%\n",
      "Minibatch loss at step 4000: 0.252765\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.3%\n",
      "Minibatch loss at step 4500: 0.248396\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 5000: 0.309714\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 5500: 0.205931\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.8%\n",
      "Minibatch loss at step 6000: 0.344032\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 6500: 0.167668\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 7000: 0.291468\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.2%\n",
      "Minibatch loss at step 7500: 0.183530\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 90.3%\n",
      "Minibatch loss at step 8000: 0.275425\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 90.4%\n",
      "Minibatch loss at step 8500: 0.143154\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 9000: 0.174426\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.2%\n",
      "Minibatch loss at step 9500: 0.191256\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.6%\n",
      "Minibatch loss at step 10000: 0.177660\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 10500: 0.156403\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 11000: 0.076319\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 11500: 0.141267\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 90.6%\n",
      "Minibatch loss at step 12000: 0.126884\n",
      "Minibatch accuracy: 96.1%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 12500: 0.100883\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.5%\n",
      "Minibatch loss at step 13000: 0.167142\n",
      "Minibatch accuracy: 94.5%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 13500: 0.077268\n",
      "Minibatch accuracy: 97.7%\n",
      "Validation accuracy: 90.8%\n",
      "Minibatch loss at step 14000: 0.110395\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 90.8%\n",
      "Minibatch loss at step 14500: 0.094239\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 15000: 0.073499\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.7%\n",
      "Minibatch loss at step 15500: 0.083872\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 16000: 0.030450\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 90.8%\n",
      "Minibatch loss at step 16500: 0.058453\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 17000: 0.026212\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 17500: 0.013765\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 90.9%\n",
      "Minibatch loss at step 18000: 0.056984\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 90.9%\n",
      "Test accuracy: 96.3%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 18001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Huge! That's my best score on this dataset. I have also tried more parameters, but it does not help:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 512\n",
    "num_hidden_nodes3 = 256\n",
    "keep_prob = 0.5\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "  # Input data. For the training data, we use a placeholder that will be fed\n",
    "  # at run time with a training minibatch.\n",
    "  tf_train_dataset = tf.placeholder(tf.float32,\n",
    "                                    shape=(batch_size, image_size * image_size))\n",
    "  tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "  tf_valid_dataset = tf.constant(valid_dataset)\n",
    "  tf_test_dataset = tf.constant(test_dataset)\n",
    "  global_step = tf.Variable(0)\n",
    "\n",
    "  # Variables.\n",
    "  weights1 = tf.Variable(\n",
    "    tf.truncated_normal(\n",
    "        [image_size * image_size, num_hidden_nodes1],\n",
    "        stddev=np.sqrt(2.0 / (image_size * image_size)))\n",
    "    )\n",
    "  biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "  weights2 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2], stddev=np.sqrt(2.0 / num_hidden_nodes1)))\n",
    "  biases2 = tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "  weights3 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes2, num_hidden_nodes3], stddev=np.sqrt(2.0 / num_hidden_nodes2)))\n",
    "  biases3 = tf.Variable(tf.zeros([num_hidden_nodes3]))\n",
    "  weights4 = tf.Variable(\n",
    "    tf.truncated_normal([num_hidden_nodes3, num_labels], stddev=np.sqrt(2.0 / num_hidden_nodes3)))\n",
    "  biases4 = tf.Variable(tf.zeros([num_labels]))\n",
    "  \n",
    "  # Training computation.\n",
    "  lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "  drop1 = tf.nn.dropout(lay1_train, 0.5)\n",
    "  lay2_train = tf.nn.relu(tf.matmul(drop1, weights2) + biases2)\n",
    "  drop2 = tf.nn.dropout(lay2_train, 0.5)\n",
    "  lay3_train = tf.nn.relu(tf.matmul(drop2, weights3) + biases3)\n",
    "  drop3 = tf.nn.dropout(lay3_train, 0.5)\n",
    "  logits = tf.matmul(drop3, weights4) + biases4\n",
    "  loss = tf.reduce_mean(\n",
    "    tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))\n",
    "    \n",
    "  # Optimizer.\n",
    "  learning_rate = tf.train.exponential_decay(0.5, global_step, 5000, 0.80, staircase=True)\n",
    "  optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "  \n",
    "  # Predictions for the training, validation, and test data.\n",
    "  train_prediction = tf.nn.softmax(logits)\n",
    "  lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "  lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "  lay3_valid = tf.nn.relu(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "  valid_prediction = tf.nn.softmax(tf.matmul(lay3_valid, weights4) + biases4)\n",
    "  lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "  lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "  lay3_test = tf.nn.relu(tf.matmul(lay2_test, weights3) + biases3)\n",
    "  test_prediction = tf.nn.softmax(tf.matmul(lay3_test, weights4) + biases4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 2.644048\n",
      "Minibatch accuracy: 10.2%\n",
      "Validation accuracy: 22.9%\n",
      "Minibatch loss at step 500: 0.505960\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 84.5%\n",
      "Minibatch loss at step 1000: 0.571871\n",
      "Minibatch accuracy: 80.5%\n",
      "Validation accuracy: 85.0%\n",
      "Minibatch loss at step 1500: 0.519562\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 85.4%\n",
      "Minibatch loss at step 2000: 0.388242\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 86.4%\n",
      "Minibatch loss at step 2500: 0.469020\n",
      "Minibatch accuracy: 82.8%\n",
      "Validation accuracy: 86.6%\n",
      "Minibatch loss at step 3000: 0.533019\n",
      "Minibatch accuracy: 83.6%\n",
      "Validation accuracy: 86.6%\n",
      "Minibatch loss at step 3500: 0.550292\n",
      "Minibatch accuracy: 84.4%\n",
      "Validation accuracy: 87.2%\n",
      "Minibatch loss at step 4000: 0.479638\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 87.4%\n",
      "Minibatch loss at step 4500: 0.430816\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 87.2%\n",
      "Minibatch loss at step 5000: 0.413097\n",
      "Minibatch accuracy: 88.3%\n",
      "Validation accuracy: 87.7%\n",
      "Minibatch loss at step 5500: 0.483560\n",
      "Minibatch accuracy: 82.8%\n",
      "Validation accuracy: 87.8%\n",
      "Minibatch loss at step 6000: 0.562747\n",
      "Minibatch accuracy: 82.0%\n",
      "Validation accuracy: 88.3%\n",
      "Minibatch loss at step 6500: 0.346888\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 88.3%\n",
      "Minibatch loss at step 7000: 0.523011\n",
      "Minibatch accuracy: 81.2%\n",
      "Validation accuracy: 88.3%\n",
      "Minibatch loss at step 7500: 0.518974\n",
      "Minibatch accuracy: 84.4%\n",
      "Validation accuracy: 88.6%\n",
      "Minibatch loss at step 8000: 0.692198\n",
      "Minibatch accuracy: 80.5%\n",
      "Validation accuracy: 88.8%\n",
      "Minibatch loss at step 8500: 0.438252\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 88.6%\n",
      "Minibatch loss at step 9000: 0.436238\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 88.8%\n",
      "Minibatch loss at step 9500: 0.430096\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 88.9%\n",
      "Minibatch loss at step 10000: 0.506851\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 88.8%\n",
      "Minibatch loss at step 10500: 0.352449\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.2%\n",
      "Minibatch loss at step 11000: 0.386867\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.3%\n",
      "Minibatch loss at step 11500: 0.369807\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 89.3%\n",
      "Minibatch loss at step 12000: 0.622503\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 12500: 0.330038\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 13000: 0.437459\n",
      "Minibatch accuracy: 87.5%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 13500: 0.383894\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 14000: 0.422878\n",
      "Minibatch accuracy: 84.4%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 14500: 0.470360\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 89.7%\n",
      "Minibatch loss at step 15000: 0.400381\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 15500: 0.422781\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 16000: 0.276475\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.8%\n",
      "Minibatch loss at step 16500: 0.233879\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 17000: 0.289002\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 89.7%\n",
      "Minibatch loss at step 17500: 0.200542\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 90.0%\n",
      "Minibatch loss at step 18000: 0.277440\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 18500: 0.352895\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 19000: 0.293568\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 90.1%\n",
      "Minibatch loss at step 19500: 0.369922\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 89.9%\n",
      "Minibatch loss at step 20000: 0.426287\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 90.2%\n",
      "Test accuracy: 95.6%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 20001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "  tf.initialize_all_variables().run()\n",
    "  print(\"Initialized\")\n",
    "  for step in range(num_steps):\n",
    "    # Pick an offset within the training data, which has been randomized.\n",
    "    # Note: we could use better randomization across epochs.\n",
    "    offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "    # Generate a minibatch.\n",
    "    batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "    batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "    # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "    # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "    # and the value is the numpy array to feed to it.\n",
    "    feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "    _, l, predictions = session.run(\n",
    "      [optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "    if (step % 500 == 0):\n",
    "      print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "      print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "      print(\"Validation accuracy: %.1f%%\" % accuracy(\n",
    "        valid_prediction.eval(), valid_labels))\n",
    "  print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "default_view": {},
   "name": "3_regularization.ipynb",
   "provenance": [],
   "version": "0.3.2",
   "views": {}
  },
  "kernelspec": {
   "display_name": "Python 2",
   "language": "python",
   "name": "python2"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
