{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "kR-4eNdK6lYS"
   },
   "source": [
    "Deep Learning\n",
    "=============\n",
    "\n",
    "Assignment 3\n",
    "------------\n",
    "\n",
    "Previously in `2_fullyconnected.ipynb`, you trained a logistic regression and a neural network model.\n",
    "\n",
    "The goal of this assignment is to explore regularization techniques."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Mainly take reference from:\n",
    "https://github.com/rndbrtrnd/udacity-deep-learning/blob/master/3_regularization.ipynb"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "JLpLa8Jt7Vu4"
   },
   "outputs": [],
   "source": [
    "# These are all the modules we'll be using later. Make sure you can import them\n",
    "# before proceeding further.\n",
    "from __future__ import print_function\n",
    "\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "from six.moves import cPickle as pickle"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "1HrCK6e17WzV"
   },
   "source": [
    "First reload the data we generated in `1_notmnist.ipynb`."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 11777,
     "status": "ok",
     "timestamp": 1449849322348,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "y3-cj1bpmuxc",
    "outputId": "e03576f1-ebbe-4838-c388-f1777bcc9873"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 28, 28) (200000,)\n",
      "Validation set (10000, 28, 28) (10000,)\n",
      "Test set (10000, 28, 28) (10000,)\n"
     ]
    }
   ],
   "source": [
    "pickle_file = r'D:\\GitHub\\Data\\notMNIST\\notMNIST.pickle'\n",
    "\n",
    "with open(pickle_file, 'rb') as f:\n",
    "    save = pickle.load(f)\n",
    "    train_dataset = save['train_dataset']\n",
    "    train_labels = save['train_labels']\n",
    "    valid_dataset = save['valid_dataset']\n",
    "    valid_labels = save['valid_labels']\n",
    "    test_dataset = save['test_dataset']\n",
    "    test_labels = save['test_labels']\n",
    "    del save  # hint to help gc free up memory\n",
    "    \n",
    "    print('Training set', train_dataset.shape, train_labels.shape)\n",
    "    print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "    print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "L7aHrm6nGDMB"
   },
   "source": [
    "Reformat into a shape that's more adapted to the models we're going to train:\n",
    "- data as a flat matrix,\n",
    "- labels as float 1-hot encodings."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     },
     "output_extras": [
      {
       "item_id": 1
      }
     ]
    },
    "colab_type": "code",
    "executionInfo": {
     "elapsed": 11728,
     "status": "ok",
     "timestamp": 1449849322356,
     "user": {
      "color": "",
      "displayName": "",
      "isAnonymous": false,
      "isMe": true,
      "permissionId": "",
      "photoUrl": "",
      "sessionId": "0",
      "userId": ""
     },
     "user_tz": 480
    },
    "id": "IRSyYiIIGIzS",
    "outputId": "3f8996ee-3574-4f44-c953-5c8a04636582"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training set (200000, 784) (200000, 10)\n",
      "Validation set (10000, 784) (10000, 10)\n",
      "Test set (10000, 784) (10000, 10)\n"
     ]
    }
   ],
   "source": [
    "image_size = 28\n",
    "num_labels = 10\n",
    "\n",
    "def reformat(dataset, labels):\n",
    "    dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)\n",
    "    \n",
    "    # Map 1 to [0.0, 1.0, 0.0 ...], 2 to [0.0, 0.0, 1.0 ...]\n",
    "    labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)\n",
    "    return dataset, labels\n",
    "train_dataset, train_labels = reformat(train_dataset, train_labels)\n",
    "valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)\n",
    "test_dataset, test_labels = reformat(test_dataset, test_labels)\n",
    "print('Training set', train_dataset.shape, train_labels.shape)\n",
    "print('Validation set', valid_dataset.shape, valid_labels.shape)\n",
    "print('Test set', test_dataset.shape, test_labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "cellView": "both",
    "colab": {
     "autoexec": {
      "startup": false,
      "wait_interval": 0
     }
    },
    "colab_type": "code",
    "id": "RajPLaL_ZW6w"
   },
   "outputs": [],
   "source": [
    "def accuracy(predictions, labels):\n",
    "    return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))/ predictions.shape[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "sgLbUAQ1CW-1"
   },
   "source": [
    "---\n",
    "Problem 1\n",
    "---------\n",
    "\n",
    "Introduce and tune L2 regularization for both logistic and neural network models. Remember that L2 amounts to adding a penalty on the norm of the weights to the loss. In TensorFlow, you can compute the L2 loss for a tensor `t` using `nn.l2_loss(t)`. The right amount of regularization should improve your validation / test accuracy.\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Logistic model\n",
    "\n",
    "batch_size = 128\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "\n",
    "    # Input data. For the training data, we use a placeholder that will be fed\n",
    "    # at run time with a training minibatch.\n",
    "    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n",
    "    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "    tf_valid_dataset = tf.constant(valid_dataset)\n",
    "    tf_test_dataset = tf.constant(test_dataset)\n",
    "    beta_regul = tf.placeholder(tf.float32)\n",
    "\n",
    "    # Variables.\n",
    "    weights = tf.Variable(tf.truncated_normal([image_size * image_size, num_labels]))\n",
    "    biases = tf.Variable(tf.zeros([num_labels]))\n",
    "\n",
    "    # Training computation.\n",
    "    logits = tf.matmul(tf_train_dataset, weights) + biases\n",
    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=logits)) + beta_regul * tf.nn.l2_loss(weights)\n",
    "\n",
    "    # Optimizer.\n",
    "    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "\n",
    "    # Predictions for the training, validation, and test data.\n",
    "    train_prediction = tf.nn.softmax(logits)\n",
    "    valid_prediction = tf.nn.softmax(tf.matmul(tf_valid_dataset, weights) + biases)\n",
    "    test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 21.881399\n",
      "Minibatch accuracy: 7.0%\n",
      "Validation accuracy: 10.7%\n",
      "Minibatch loss at step 500: 3.045356\n",
      "Minibatch accuracy: 77.3%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 1000: 1.385960\n",
      "Minibatch accuracy: 85.2%\n",
      "Validation accuracy: 77.8%\n",
      "Minibatch loss at step 1500: 1.449429\n",
      "Minibatch accuracy: 77.3%\n",
      "Validation accuracy: 79.7%\n",
      "Minibatch loss at step 2000: 0.777711\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 80.6%\n",
      "Minibatch loss at step 2500: 0.882378\n",
      "Minibatch accuracy: 75.0%\n",
      "Validation accuracy: 80.8%\n",
      "Minibatch loss at step 3000: 0.719494\n",
      "Minibatch accuracy: 83.6%\n",
      "Validation accuracy: 81.8%\n",
      "Test accuracy: 88.5%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "    tf.global_variables_initializer().run()\n",
    "    print(\"Initialized\")\n",
    "    for step in range(num_steps):\n",
    "        # Pick an offset within the training data, which has been randomized.\n",
    "        # Note: we could use better randomization across epochs.\n",
    "        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "        \n",
    "        # Generate a minibatch.\n",
    "        batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "        batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "        \n",
    "        # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "        # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "        # and the value is the numpy array to feed to it.\n",
    "        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        if (step % 500 == 0):\n",
    "            print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "            print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "            print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n",
    "    print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the accuracy by the meta parameter value\n",
    "\n",
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:\n",
    "    with tf.Session(graph=graph) as session:\n",
    "        tf.global_variables_initializer().run()\n",
    "        for step in range(num_steps):\n",
    "            # Pick an offset within the training data, which has been randomized.\n",
    "            # Note: we could use better randomization across epochs.\n",
    "            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "\n",
    "            # Generate a minibatch.\n",
    "            batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "            batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "\n",
    "            # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "            # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "            # and the value is the numpy array to feed to it.\n",
    "            feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "            _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXoAAAEMCAYAAADK231MAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO3deXxU5bnA8d+TnWwsSUjYQ9iRVaIgCETADRfc96q1iNjFVu/1XtvacuvtYlttbd0odalUBQUUd8UtLLLIrmwCCUkIYQlLCFlJMu/945xwhzAhk2SSMzN5vp/PfJI5533Pec7MO8+8854z74gxBqWUUsErxOkAlFJKtSxN9EopFeQ00SulVJDTRK+UUkFOE71SSgU5TfRKKRXkNNErvyciUSJiRKS707E0loisFpE7mlE/S0Qu8HFMkSJSIiJdfbldt+3/VURm2v9fJiK7fbDNJscsIr8RkWe8KPeciNzdpAD9nCZ6H7AbYO3NJSLlbvdvb8Z2m5UkVOAzxvQxxqxqzjbqtiNjTKUxJtYYU9D8CM/YVzfgBuAlX27X25g9vbEYY2YZY37sxW7+BPyPiIQ2J1Z/pIneB+wGGGuMiQXygKvclr3mdHwtRUTCnI6hufz1GPw1Li/cAyw2xpx0OpDGMsbkAHuByx0Oxec00bcCEQkVkV+JSLaIHBaR10Skg70uRkTmi8hRESkSkTUi0lFEngTOA16wPxk86WG7YSKySEQO2nW/FJEBbutjROTvIrJXRI6LyNLaBCIiGXZP77iI5InIbfby03p/IjJTRD6z/68dQrlfRLKALfby50UkX0SKReRrERlTJ8ZZ9rEXi8haEUkRkRdF5Hd1jufT2o/89bhGRHJEpFBEfieWaHu7/dy2011Eymof4zr7mCkiX4jIsyJyDHjEXn6fiHxnPw8f2D3T2jpXiMgu+zF+yv0xEpHHReQFt7IDRaTaU/D2ukx7H4Ui8oqIxLmtPyAi/ykiW4Fit2UX2m3I/ZNjqf1cpIhIkoh8ZG/zqIi8IyJd7PpntCOpMxQmIp1E5HW7/h4R+S8REbfH63O7HRWJNZQ05SzP0eXA0vpWishQEVlub+sbEbncbV1n+ziK7cf4cQ9trzbmaSKyQ0RO2O37ARFJAN4G0twepwQPz5HHtm/LBK44y/EFJmOM3nx4A3KAKXWWPQIsB7oCUcC/gJftdT8FFgLtgDCsF2WMvW41cMdZ9hUG3AXE2tt9Hljttv5FYAmQAoQC4+2/fYES4Hp7G0nAcE/7BGYCn9n/RwEG+ADoALSzl98JdATCgV9i9YrC7XW/Ajba+wwBRtp1JwB7ALHLdQXKgE4ejrN2v5/YdXsD2bVxYg0T/Mat/H8DC+p5zGYC1cC99mPRDrgF2A70t4/ht8CXdvkU+7G60l73X0CV274fB15w2/5AoNrt/mq3sgOBSUCEvd3VwONuZQ8Aa+3Hop3bsgs9HMdfgM/sY0gGptnH0h54B5jvKYY6j2d3+/6bwAK7HfW1n5fb3R6vKvs5DgUeBHLO0iZPAEPd7l8G7Hbbby7wH/Zjean92Pa21y8G5trHMQzYz5ltrzbmI8D59v8JwMi6+3OL4dRzxFnavr3+NmCl03nE53nJ6QCC7YbnRL8HGOd2vzdWUhPgh1g9oCEetnXWRO+hfArgsl8U4fYLdICHcr8B5tWzDW8S/dizxCD2sQ2w7+cCl9ZTLhsYb9//T+CterZZu98Mt2UPAR/Y/090f3ED3wJX17OtmcDOOsu+rE1s9v3axy4ZmIGd9O11IcAhmpDoPcRyC7DK7f4B4LY6Zc5I9FhJdzce3hTt9WOA/Wd5Tk8lTSASqAHS3Nb/FPjY7fHa4rauk123g4f9htrrUt2WuSf6i+32IG7r38bqCEXZbbeX27onPLS92kR/EPg+EFcnhoYSfb1t315/FbDN29dcoNx06KaF2R+BewAf2h9Xi7B6uCFYPZEXsRL9Qnv44/fi5ckge1jkidphEWAHVgJNALpg9ViyPVTtAWQ147D21onj5/awx3HgGNaLMtE+9m6e9mWsV9VcoHaY6A7g343Yby5WzxdgGRAqIheIyAisY//I2/iBXsBst+enEKvX393ex6nyxhgXsK+BOD0Ska4iskBE9tnP1wtAYgOx1d3G+cCTwDXGmKP2sjgReckehijG+hRXd7v1ScFqi3luy3KxnrdaB9z+L7P/xtbdkDGmBqtHH1d3na0rkGc/93X3lYLVdvPd1p3tsbgGq1eeZw/FpZ+lrLuG2n4cUOTltgKGJvoWZjfqfcAkY0wHt1uUMeawsa4m+LUxZiDWcMaNWD09sHowZ/N94BLgIqyP7APt5YL1sbcaSPNQby/Qp55tlgLRbvdTPB1W7T8icjHwE+BarGGVTkA5Vq+t9tjr29dc4AYRGYX1AvygnnK1erj93xMogDPeNL6HNWxRdZbt1H1c9wJ313l+2hlj1mM9jqcu6xSREE5Pgt48XrX+bJcfYoyJB6ZjPVdni+0Ue9z9LWC6MWaL26pH7BjPs7d7SZ3tnq0dHcDqSfd0W9aTJr6ZAd9gDYF5UlBnP+77OoAVp/tj24N6GGNWGWOuxPrUtQSYV7uqgfjO1vYBBgGbG9hGwNFE3zpmA4+LSA84ddLpKvv/KSIy2E4gxVjJucaudxDPibpWHFCBNV4ZgzW2DICd6OYCfxORZPtk3oX2p4W5wJUicq29PElEhtlVN2El3ygRGQjc3cCxxWENcxRijT0/htWjr/UC8HsRSRPLSLFPkhpjsoFtwMvAG6bhKzX+W0Tai0gq8GPgDbd1c4GbgFvt/xtjNvCo2CeyxToZfr297l1gtIhMFetE9kNY5yNqbQIuEpFuItIR6/xAfeKwxoeLRaSnvS2viEgE1jDHP4wx73jYbhlQJCKJwKN11tfbjowxlfZ2fy/Wyfs+WEM3r3obWx0fYg2lebIcCBGRn9mfRi/GelNaYIypAN4DfmO3vSFY4+VnsOO8RUTisdreCU5/zXQWkTM+cdjO1vaxYz/bp8HA5PTYUbDd8DxGH4qVAHZhNcrdwCx73V328lKsXs2TQIi9bqJd9hjwJw/7ao/VCy7BOg9wN6ePY8YAz2L1pIqwxqLD7HWTsE78FWN9fL7VXp4MfGHHuQzrzcPjOKm9LBxryKUYq2f2M9zGle31j9mPywlgDZDsVn+6vc0LzvKY1u73x/Z2DmONu4bUKbcC+K6B5+fUOYc6y38A1F7tkgvMdlt3tf08FAFPARuAG+11IcA/gePAd8B91H8ydgTWG0MJsN5uE+7nFjyNxx8ALsT6tGbsuu63zli94hX2/R1Y533cYzitHdV9HrGG+ubbj2su8HP+/yT5aY+XpzZQJ96uWMNAEfb908bMgeF2rMexzqVc4bYuBeuE+wn7cXuS/z8P435eIQarF3/Mfr7WAKPtcoL1JnXEfr46ceZ5lPrafi/7fpjTecTXt9onUylHiMglwHPGmL4+2NbrWCfSfttg4abvIwwr+V5lmvlFpmAlIn/BOuE9u5nb+RsQZYy5zzeRNbi/Z4H1xhifftnLH2iiV46xhyPeApYZY/7UzG31xeppDzLGNHV8ub5tXw6sBCqxLh+9C+hrAvBLQf7MHq4xWMN5F2B9Wr3VGPOxo4EFAR2jV46wr445hjW+/Gwzt/UnrCuZHvN1krfVXvN/CJgMXKtJvkW0xxqnL8UafvmtJnnf0B69UkoFOe3RK6VUkNNEr5RSQc7vZshLTEw0qampTa5fWlpKTEyM7wJSqhG0/SmnrF+//rAxJsnTOr9L9Kmpqaxbt67J9TMzM8nIyPBdQEo1grY/5RQRya1vnQ7dKKVUkNNEr5RSQU4TvVJKBTlN9EopFeQ00SulVJDTRK+UUkFOE71SQSL3SCmHTlQ4HYbyQ353Hb1SqvGyCku4+ukVADwydRC3n9+TkJC6P16l2irt0SsV4MpOVvPDVzcQERbC8B4d+NXiLdz6z9XsOVzqdGjKT2iiVyqAGWN49O0t7Dx0gqduGclr00fzx+uHsm1/MZc9tYw5y7KornE5HaZymCZ6pQLY/LV7eWvjPh6Y1I+J/ZMQEW4+ryefPTSR8f2S+P2HO7j++ZV8d+CE06EqB2miVypAbdl3nFnvbmV8v0QemNzvtHXJ8VH8885RPH3rSPKPlXPl08t56rOdnKzW3n1bpIleqQB0vKyK+19bT0JMBE/dPIJQDydeRYSrhnfl04cmMnVoF576bBdXP7OCzXuLHIhYOUkTvVIBxuUy/MeCTewvquCZ284lITbyrOU7xUTwt1tG8sKd6RSVVXHtc1/xhw+3U1FV00oRK6dpolcqwPxjWTafbT/EL68YxKheHb2uN2VwMksemsDN5/XgH8uyueypZazJPtKCkSp/oYleqQCyKusIf/5kB1cM68LdY1MbXT8+Kpw/XDeM16ePxmXg5jmreXTxt5RUVvs+WOU3vEr0IvKgiGwVkS0iMk9EokRksohsEJFNIrJCRPp6qJcqIuV2mU0iMtv3h6BU23CouIKfzNtIamIMf7x+GCJN/0LU2L6JfPyz8fzgwt68tiaPS/6ylMzvDvkwWuVPGkz0ItINeABIN8YMAUKBW4DngduNMSOA14FH69lEljFmhH2b6aO4lWpTqmtc/HjeRkoqq3j+9lHERjb/S+3REWH86srBLLp/LNGRYdz98lru+/c6PVkbhLwdugkD2olIGBANFAAGiLfXt7eXKaVawBNLdvL1nqP8/tqhDEiJ8+m2z+3ZkQ8euJAHp/RnVdYRpj37Fbf9czXLdhZijPHpvpQzxJsnUkR+CvwOKAeWGGNuF5HxwGJ7WTEwxhhTXKdeKrAV2GmXedQYs9zD9mcAMwCSk5NHzZ8/v8kHVFJSQmxsbJPrK9UcLdH+Nh6q5m8bKsnoEcbd55z9CpvmKq82ZO6t5pOcKooqDb3iQ5jaO5z05FCPl3Aq/3HRRRetN8ake1rXYKIXkY7AIuBmoAhYACwErgP+aIxZIyIPAwOMMdPr1I0EYo0xR0RkFNYbwzl13xDcpaenG/1xcBWofN3+8o6UccXTy+mVEM3CmWOJCg/12bbPprK6hnc2FjB7WRbZhaX07BTNvRPSuHFU91aLQTWOiNSb6L0ZupkC7DHGFBpjqoC3gHHAcGPMGrvMG8DYuhWNMZXGmCP2/+uBLKB/E45BqTanoqqG+19bjwDP3z6qVRNsZFgoN53Xg88enMg/vjeKTjER/GrxFi784xc888UujpdVtVosqvm8SfR5wBgRiRbrNP9kYBvQXkRqk/bFwPa6FUUkSURC7f/TgH5Atk8iVyrI/ea9bWwtKOYvN42gR6doR2IICREuPSeFt384lvkzxjCkW3ueWLKTsY9/zm/f38b+4+WOxKUap8FT9/bQzEJgA1ANbATmAPnAIhFxAceAewBE5GqsK3R+DUwAHhORaqAGmGmMOdoiR6JUEFm0Pp95X+dxf0YfpgxOdjocRIQxaQmMSUtgW0Ex/1iWxcsrc3hlVQ7XjOjGfRPT6NvZtyeJle94dTK2NekYvQpkvmh/Ow4Uc82zXzGiRwde/cFowkL983uNe4+W8cLybN5Yt5eKKhcXD05m6tAUOsdFkRQXSVJsJO3bhesPoLSSs43R6y9MKeVHTlRUcf+rG4iPCufvt4702yQP0KNTNL+ZNoQHJvfjlVW5vLIyh0+3HTytTFiIkBgbaSV+O/knxkWQFBtJUu0bgn2LiQht1pfAVP000SvlJ4wx/Peib8g7Wsbr00fTOS7K6ZC8khAbyUMX9+eHGX3YV1TO4ROVFJZUUnjC7VZSyaETFWwtOM7hkpPUuM4cSYgKD6FDuwiiI0OJiQgjOiKUmEj7b0TY/y/3sD7avt+9Yzs6REc48Cj4N030SvmBGpdh9tIsPvz2AD+/fCCj0xKcDqnRosJD6ZMUS5+ks3+PwOUyHCs7eerN4LD991BxJcUVVZSerKH8ZA2lldUUnqik9GQ1ZZU11t+TNR7fJNz17RzLeakdGdWrE+eldqRnp+g2/0lBE71SDqqoquHtjfuYsyybPYdLueycFGZMSHM6rBYVEiIkxEaSEBvJwJTG1TXGUFntosx+Iyivsv6WnayhpLKa3YdKWJdzlPe/2c+8r/cCkBQXSXqvjqSnWol/UJd4wv14SKwlaKJXygHFFVW8tjqPl77aQ+GJSoZ2a8+zt53LZUNS2nzv82xEhKjwUKLCQ+kUc+YQzaXnWH9dLsOuQyWszTnK+txjrM05ykdbDgDQLjyUkT07nEr+I3t2IC4qvDUPo9VpoleqFR0qruDFr/bw+uo8TlRWM75fIk/dPIKxfRI0wftQSIgwICWOASlx3DGmFwAHjlewLvco63KOsS73KM98uRuXgRCBgSnxnJfakQn9k5jQPynoevya6JVqBXsOlzJnWRaL1u+j2uXi8qFdmDmhD0O7t3c6tDYjpX0UVw7rypXDugJQUlnNprwi1uYcZV3uURasz+eVVbkkxkYwbUQ3rj+3O4O7xjew1cCgiV6pFrR5bxGzl2bx8dYDhIeGcGN6d+4dn0ZqYozTobV5sZFhXNgvkQv7JQJQVeNi6XeFLNqQz9xVOby4Yg+DusRzw6juTBvRlcQGfrLRn2miV8rHjDEs33WY2UuzWJl1hLioMO6f2Ie7x6UGzCWTbVF4aAhTBiczZXAyx0pP8t43BSxcn8//vr+NP3y4nYwBnblhVDcmDUwmIiywhnY00SvlI9U1Llbvr+bPT69ga0ExneMi+cXUgdx6fs+gP9kXbDrGRHDnBanceUEquw6eYOGGfN7esI/Pth+kQ3Q404Z35fpR3RnarX1AnFvRRK9UMx0rPckb6/by6upc8o9VkpYUxh+vH8o1I7sRGaZT+ga6fslx/PzyQTx8yQBW7D7MwvX5zFu7l1dW5dI/OZbrz+3OtSO70Tnefz+taaJXqom+yS9i7qpc3ttcQGW1i/N7d+KaXi4eummizu8ShMJCQ8gY0JmMAZ05Xl7F+98UsGh9Pn/4aAd//HgH4/slcecFvZg0sLPf9fI10SvVCBVVNXz47X7mrspl094ioiNCuX5Ud+68oBcDU+LJzMzUJN8GtG8Xzu2je3H76F5kFZbw1oZ83tqwjx+8so7RvTvx6BWD/eqKKk30SnlhX1E5r63OZf7avRwtPUlaYgyzrhrM9aO6E6/j721an6RYHr50ID+b0p/5X+fx1892cdUzK7huZDf+89IBdO3QzukQNdErVR9jDF/tPsIrq3L4fLs1K+PkQcnceUEvxvVJ1J67Ok14aAjfuyCVaSO78dyXWbz01R4++HY/945PY2ZGH2IjnUu3muiVqqO4ooq31uczd3Uu2YWldIqJYObEPtw2uifdOzrzS08qcMRHhfPI5QO5fXRP/vTJdzzz5W7mr93LQxf356b07o5MPa2JXinb7kMlvPzVHt7euI+ykzWM6NGBv9w0nKlDu+gPYqtG69EpmqdvHck941L57Qfb+cXb3/LKyhx+ccUgJvZPatVYNNErBXy+/SA/fG0DBrh6eFfuvKAXw7p3cDosFQRG9uzIwpkX8NGWAzz+0Q7ueulrJvRP4pdTBzEgpXV+flETvWrzFm/cx38s2Mw5XeN54a50/faq8jkRYerQLkwe1Jl/r8rl75/v4vK/LePm83rw4MX9W7zNBdb3eJXysbmrcvjZG5s4L7UjrwXQrzqpwBQZFsr08Wksffgi7hqbyoJ1+Vz050ye/nwX5SdrWmy/muhVm2SM4enPd/Hrd7YyZVAy//r++TpNgWo1HWMimHXVOXz60EQu7JfIk5/uZNKTmSxan48xZ/8FrabQRK/aHJfL8NsPtvPkpzu5bmQ3Zt9xrp5sVY7onRjDP76XzhszxpAUF8nC9fktsh8do1dtSnWNi0fe+paF6/O5e2wqv75ysF4Prxw3Oi2BxT8cx/HyqhaZPkETvWozKqpq+On8jXyy9SA/ndyPn03p53dzkqi2KyRE6Ojh5xF9QRO9ahNKKqu579/r+Gr3EX595WDuubC30yEp1Wo00augd6z0JHf/ay1b9h3niRuHc8Oo7k6HpFSr0kSvgtrB4gq+9+Iacg6X8dzt53LpOSlOh6RUq9NEr4JW7pFS7nhxDUdLTvKv75/H2L6JToeklCO8urxSRB4Uka0iskVE5olIlIhMFpENIrJJRFaISN966v5cRHaLyHcicqlvw1fKsx0Hirlh9ipOVFTz2r1jNMmrNq3BRC8i3YAHgHRjzBAgFLgFeB643RgzAngdeNRD3cF22XOAy4DnREQvWFYtakPeMW7+x2pCBBbcdwEjeuicNapt8/YLU2FAOxEJA6KBAsAA8fb69vayuqYB840xlcaYPcBu4PzmhaxU/ZbvKuT2f66hQ3Q4C2eOpV9y60wapZQ/a3CM3hizT0SeAPKAcmCJMWaJiEwHPhSRcqAYGOOhejdgtdv9fHuZUj738Zb9/GTeRvokxTL3B+frvDVK2RpM9CLSEatn3hsoAhaIyB3AdcBUY8waEXkY+AswvW51D5s8YyIHEZkBzABITk4mMzOzMcdwmpKSkmbVV4GpqNLFfy0tp2dcCD85p4Zt61ezzYE4tP0pf+TNVTdTgD3GmEIAEXkLGAcMN8asscu8AXzsoW4+0MPtfnc8DPEYY+YAcwDS09NNRkaGt/GfITMzk+bUV4Hp9x9up9pk8+K9E0hNjHEsDm1/yh95M0afB4wRkWixvi8+GdgGtBeR/naZi4HtHuq+C9wiIpEi0hvoB3ztg7iVOuVo6UleXZ3L1cO7OprklfJX3ozRrxGRhcAGoBrYiNX7zgcWiYgLOAbcAyAiV2NdofNrY8xWEXkT642hGviRMablJl1WbdKLK7Ipr6rhx5M8XuGrVJvn1RemjDGzgFl1Fr9t3+qWfRerJ197/3fA75oRo1L1Ol5WxSsrc5k6pAt9O+sVNkp5ovPRq4D28so9lFRWa29eqbPQRK8C1omKKl5asYeLByczqEt8wxWUaqM00auANXdVLsUV1TwwqZ/ToSjl1zTRq4BUdrKaF1fsIWNAEkO7t3c6HKX8miZ6FZBeW53H0dKT/ER780o1SBO9CjgVVTXMWZ7NuL4JjOrV0elwlPJ7muhVwHlj7V4KT1Rqb14pL2miVwGlsrqG2UuzOC+1I6N7d3I6HKUCgiZ6FVAWrd/H/uMV/GRSP6wZOZRSDdFErwJGVY2L5zJ3M7xHB8b301+MUspbmuhVwFi8cR/5x8p5YFJf7c0r1Qia6FVAqHEZnsvM4pyu8Uwa2NnpcJQKKJroVUB4/5sC9hwu5Sfam1eq0TTRK7/nchme+WI3A5LjuGRwitPhKBVwNNErv/fx1gPsOlTCjyb1JSREe/NKNZYmeuXXjDE8/cVu0pJiuGJoF6fDUSogaaJXfu2z7YfYvr+YH2X0JVR780o1iSZ65bes3vwuenaKZtqIrk6Ho1TA0kSv/NbSnYV8k3+cH2b0ISxUm6pSTaWvHuWXasfmu3Vox3Xndnc6HKUCmiZ65ZdWZR1hfe4xZk5MIyJMm6lSzaGvIOWX/v7FLjrHRXJjeg+nQ1Eq4GmiV35nbc5RVmcf5b6JfYgKD3U6HKUCniZ65Xf+/vkuEmIiuO38nk6HolRQ0ESv/MqmvUUs33WY6ePTaBehvXmlfEETvfIrT3++iw7R4Xzvgl5Oh6JU0NBEr/zGln3H+XzHIe4Z15vYyDCnw1EqaGiiV37h0IkK/vjxDuIiw7hrbKrT4SgVVLTbpBxTUVXD59sPsWhDPkt3FlLjMvxi6kDatwt3OjSlgopXiV5EHgSmAwb4Fvg+8CkQZxfpDHxtjLnGQ90auw5AnjHm6uYGrQKXMYZNe4tYtCGfdzcVUFxRTUp8FDMmpHH9ud3p2znW6RCVCjoNJnoR6QY8AAw2xpSLyJvALcaY8W5lFgHv1LOJcmPMCJ9EqwLWgeMVvLUxn0Xr88kqLCUyLITLhqRww6jujO2TqDNTKtWCvB26CQPaiUgVEA0U1K4QkThgElYvX6lTyk/WsGTbARauz2fF7sMYA+elduTe8WlMHdaF+CgdolGqNTSY6I0x+0TkCSAPKAeWGGOWuBW5FvjcGFNczyaiRGQdUA08boxZXLeAiMwAZgAkJyeTmZnZuKNwU1JS0qz6qnmMMewqcrFiXzVrD1RTXg0JUcJVaeGM6xpGcsxJKMtmw+psp0NtEdr+lD/yZuimIzAN6A0UAQtE5A5jzKt2kVuBF86yiZ7GmAIRSQO+EJFvjTFZ7gWMMXOAOQDp6ekmIyOj8Udiy8zMpDn1VdO9sjKHl77aQ+6RCqIjQpk6rDvXj+rGmN4JbeYnALX9KX/kzdDNFGCPMaYQQETeAsYCr4pIAnA+Vq/eI2NMgf03W0QygZFAVn3lVWB6/5sCZr27lfReHfnJpH5cPiSFGL0WXim/4M0rMQ8YIyLRWEM3k4F19robgfeNMRWeKtqfBsqMMZUikgiMA/7U/LCVPzlYXMEv397C8B4dmDdjDOH6IyFK+ZUGX5HGmDXAQmAD1mWSIdjDLMAtwDz38iKSLiK1QzmDgHUishn4EmuMfpuPYld+wBjDwwu/obK6hr/eNFyTvFJ+yKvP1saYWcAsD8szPCxbh3XNPcaYlcDQ5oWo/Nmra/JYtrOQx6adQ1qSXgOvlD/S7pdqsuzCEn73wTYm9E/ie2N0EjKl/JUmetUk1TUuHnxzM5Fhofz5hmGItI2rapQKRHpZhGqS5zKz2Ly3iGduG0lyfJTT4SilzkJ79KrRvskv4m+f72LaiK5cOayr0+EopRqgiV41SkVVDQ++sYmk2Egeu3qI0+EopbygQzeqUR7/aAdZhaW8+oPRtI/WuWqUCgTao1deW76rkH+tzOHusalc2C/R6XCUUl7SRK+8crysiocXfEOfpBgeuXyg0+EopRpBE73yyq/f3cLhkkr+evMIosJDnQ5HKdUImuhVg97bXMA7mwp4YHI/hnXv4HQ4SqlG0kSvzurA8QoeXbyFET068MOMPk6Ho5RqAk30ql7WhGWbOVnt4i83DSdMJyxTKiDpK1fV69+rc1m+6zC/uGKQTlimVADTRK88yios4fcfbmdi/yTuGN3T6bDG6awAAA/ASURBVHCUUs2giV6doarGxUNvbCIqXCcsUyoY6Ddj1Rme/XI3m/OP8+xt59JZJyxTKuBpj16dZvPeIp7+YjfXjuzGFcO6OB2OUsoHNNGrU8pP1vDgm5voHBfJ/1x9jtPhKKV8RIdu2jiXy3DwRAU5h8t4Y20e2YWlvDZ9NO3b6YRlSgULTfRtQHWNi4KiCnKOlJJ7tIzcw6XkHCkj90gpeUfLqKx2nSp738Q0xvXVCcuUCiaa6IPIoRMVbNl3nJzDVhLPOVJG3tEy9h4to9plTpWLDAshNSGG1MQYMgYk0SshhtSEGHolRNOjU7SDR6CUagma6IPE4ZJKJj+5lBMV1QDERobRKyGawV3iuXxICqkJMfRMiCY1IYbOcZGEhOglk0q1FZrog8TclTmcqKjmpbvTGd69A51iIvT6d6UUoIk+KJSdrGbu6lymDEpm0sBkp8NRSvkZvbwyCCxYl09RWRX3TUxzOhSllB/SRB/gqmtcvLAim5E9O5Deq6PT4Sil/JAm+gD38dYD7D1azn0T0nRMXinlkSb6AGaMYc6ybFITorl4cIrT4Sil/JRXiV5EHhSRrSKyRUTmiUiUiCwXkU32rUBEFtdT9y4R2WXf7vJt+G3b6uyjfJN/nOnj0wjVyyWVUvVo8KobEekGPAAMNsaUi8ibwC3GmPFuZRYB73io2wmYBaQDBlgvIu8aY4756gDasjnLskiIieCGUd2dDkUp5ce8HboJA9qJSBgQDRTUrhCROGAS4KlHfynwqTHmqJ3cPwUua17ICmDnwRN8+V0hd16QSlR4qNPhKKX8WIM9emPMPhF5AsgDyoElxpglbkWuBT43xhR7qN4N2Ot2P99edhoRmQHMAEhOTiYzM9PrA6irpKSkWfUDxQvfVhIRAmmufDIz9zkdjrK1lfanAos3QzcdgWlAb6AIWCAidxhjXrWL3Aq8UF91D8vMGQuMmQPMAUhPTzcZGRkNR16PzMxMmlM/EBw4XsGaT7/g1tG9uOqSIU6Ho9y0hfanAo83QzdTgD3GmEJjTBXwFjAWQEQSgPOBD+qpmw/0cLvfHbdhH9U0L6/cQ43LMP1C/YKUUqph3iT6PGCMiESLdaH2ZGC7ve5G4H1jTEU9dT8BLhGRjvYng0vsZaqJTlRU8frqPC4f0oWeCTrTpFKqYQ0memPMGmAhsAH41q4zx159CzDPvbyIpIvIC3bdo8D/Amvt22P2MtVE87/ey4nKamZM0N68Uso7Xk1qZoyZhXWZZN3lGR6WrQOmu91/CXip6SGqWlU1Ll76ag+je3dieI8OToejlAoQ+s3YAPLe5gL2H6/QycuUUo2iiT5A1E530K9zLBn9OzsdjlIqgGiiDxDLdh1mx4ET3DshTX8dSinVKJroA8ScZVl0jotk2oiuToeilAowmugDwJZ9x/lq9xG+P643kWE63YFSqnE00QeAOcuyiYkI5bbRPZ0ORSkVgDTR+7n8Y2V88O1+bj2/J+3bhTsdjlIqAGmi93MvrtiDAPdc2NvpUJRSAUoTvR87XlbFG2v3ctXwrnTt0M7pcJRSAUoTvR97dU0uZSdruHe8fkFKKdV0muj9VEVVDS9/lcP4fokM7hrvdDhKqQCmid5PLd64j8Mlldw3oY/ToSilApwmej/kchnmLM9mcJd4xvVNcDocpVSA00Tvhz7fcYjswlLum5iG9RMASinVdJro/dCcZVl069COqUO7OB2KUioIaKL3MxvyjrE25xj3XNib8FB9epRSzaeZxM/MWZpNfFQYt5zXo+HCSinlBU30fiTncCmfbDvAHWN6ERPp1Y9/KaVUgzTR+5EXVmQTHhLC3WNTnQ5FKRVENNH7icMllSxYl8+1I7vROT7K6XCUUkFEE72feGVlDidrXNw7Qac7UEr5liZ6P1BSWc0rK3O4ZHAyfTvHOh2OUirIaKL3A/O/zqO4opqZE3W6A6WU72mid9jJahcvLN/DmLROjOzZ0elwlFJBSBO9wxZv2seB4gruz+jrdChKqSClid5BLpdh9tIsBneJZ0K/RKfDUUoFKU30Dvp0+0GyC0uZmdFHJy9TSrUYTfQOMcbwfGYWPTq1Y+qQFKfDUUoFMa8SvYg8KCJbRWSLiMwTkSix/E5EdorIdhF5oJ66NSKyyb6969vwA9eaPUfZtLeIGRP6EKaTlymlWlCDE6qISDfgAWCwMaZcRN4EbgEE6AEMNMa4RKRzPZsoN8aM8FnEQeL5zCwSYyO4cVR3p0NRSgU5b7uSYUA7EQkDooEC4H7gMWOMC8AYc6hlQgw+2wqKWbqzkO+P601UeKjT4SilglyDid4Ysw94AsgD9gPHjTFLgD7AzSKyTkQ+EpF+9Wwiyi6zWkSu8VnkAWz20ixiIkK5Y3Qvp0NRSrUB3gzddASmAb2BImCBiNwBRAIVxph0EbkOeAkY72ETPY0xBSKSBnwhIt8aY7Lq7GMGMAMgOTmZzMzMJh9QSUlJs+q3tENlLt7bXM6lqeFs/Porp8NRPubv7U+1Td5Mej4F2GOMKQQQkbeAsUA+sMgu8zbwsqfKxpgC+2+2iGQCI4GsOmXmAHMA0tPTTUZGRmOP45TMzEyaU7+l/WrxFsJC8/ifWyeQ0l5nqQw2/t7+VNvkzRh9HjBGRKLFuth7MrAdWAxMsstMBHbWrSgiHUUk0v4/ERgHbPNF4IHocEklb67by3Uju2uSV0q1mgZ79MaYNSKyENgAVAMbsXrf7YDXRORBoASYDiAi6cBMY8x0YBDwDxFxYb2pPG6MabOJ/l9fWVMRz5ioUxErpVqPV79XZ4yZBcyqs7gSuMJD2XXYSd8YsxIY2swYg0JJZTVzV+Vw6eAU+iTpVMRKqdaj39RpJaemIs7QqYiVUq1LE30rqJ2K+IK0BEb06OB0OEqpNkYTfSuonYpYe/NKKSdoom9hOhWxUsppmuhbmE5FrJRymib6FlQ7FXHPTtE6FbFSyjGa6FtQ7VTE905I06mIlVKO0ezTgnQqYqWUP9BE30J0KmKllL/QRN9CZi/NIjYyjDvG6FTESilnaaJvAXlHynj/mwJuG92T9u3CnQ5HKdXGaaJvAf9cnk1YSAg/uLC306EopZQmel+rnYr42pHdSI7XqYiVUs7TRO9jOhWxUsrfaKL3IZ2KWCnljzTR+9C8NToVsVLK/2ii95FdB08wZ3m2TkWslPI7muibyeUyvLRiD1c8vQKXy/DzqQOdDkkppU7j1U8JKs8OHK/g4YWbWb7rMJMHdubx64eRFBfpdFhKKXUaTfRN9N7mAh5dvIWT1S5+f+1Qbj2/h05DrJTyS5roG+l4eRWz3tnC4k0FjOjRgb/ePILeiTFOh6WUUvXSRN8Iq7KO8B9vbuLgiUoenNKfH13UR6cfVkr5PU30XqisruHJJTv55/JsUhNiWHT/WL2yRikVMDTRN2DHgWJ+Nn8TOw6c4I4xPfnF1EFER+jDppQKHJqx6uFyGV5csYc/f/Id8e3Cefnu87hoYGenw1JKqUbTRO/BvqJy/vPNzazKPsIlg5P5w3VDSYjVyyaVUoFJE30d72zax6OLt+ByGf50/TBuTO+ul00qpQJam0/0pZXV7DhQzLb9J1j6XSGfbT/IqF4d+etNI+iZEO10eEop1WxtJtEbYzhQXMH2/cVsKyhm2/5itu8/Qc6RUoyxynSIDufhSwcwc2IfQkO0F6+UCg5eJXoReRCYDhjgW+D7QCXwW+BGoAZ43hjzdw917wIete/+1hjzig/iPquqGhdZhSVWQi8oZvsB6++xsqpTZXolRDMoJZ5rR3ZjcJd4BnWNp2v7KB2mUUoFnQYTvYh0Ax4ABhtjykXkTeAWQIAewEBjjEtEzrgkRUQ6AbOAdKw3ifUi8q4x5pgvDwKsX3b640c7+HpXOfs//YSTNS4AIsNCGJASx6XnpDC4azyDusQzMCWOuCj9LVelVNvg7dBNGNBORKqAaKAAqzd/mzHGBWCMOeSh3qXAp8aYowAi8ilwGTCvuYHXFRMRRubOQpIjhMvG9WJw13gGd4mnd2KMfntVKdWmNZjojTH7ROQJIA8oB5YYY5aIyDzgZhG5FigEHjDG7KpTvRuw1+1+vr3sNCIyA5gBkJycTGZmZlOOhT+PC6OkpILY6INQdJB9RbCvSVtSqmlKSkqa3H6VaineDN10BKYBvYEiYIGI3AFEAhXGmHQRuQ54CRhft7qHTZozFhgzB5gDkJ6ebjIyMhpzDKfJzMykOfWVag5tf8ofeTOmMQXYY4wpNMZUAW8BY7F654vsMm8DwzzUzccax6/VHWvYRymlVCvxJtHnAWNEJFqsS1ImA9uBxcAku8xEYKeHup8Al4hIR/uTwSX2MqWUUq3EmzH6NSKyENgAVAMbsYZZ2gGv2ZdelmBdfomIpAMzjTHTjTFHReR/gbX25h6rPTGrlFKqdXh11Y0xZhbWZZLuKoErPJRdh5307fsvYY3fK6WUcoBed6iUUkFOE71SSgU5TfRKKRXkxJgzLmt3lIgUArlnKdIeOH6W9YnAYZ8G1boaOj5/319zt9fY+o0p703Z5pbR9ufs/lq7/TWmjq/K1be+lzEmyWMNY0xA3YA5Daxf53SMLXl8/r6/5m6vsfUbU96bss0to+3P2f21dvtrTB1flWtKjIE4dPOe0wG0sNY+Pl/vr7nba2z9xpT3pqyvygQqbX8tV8dX5Rodo98N3TSXiKwzxqQ7HYdqm7T9KX8UiD36hsxxOgDVpmn7U34n6Hr0SimlTheMPXqllFJuNNErpVSQ00SvlFJBrk0lehGJEZH1InKl07GotkdEBonIbBFZKCL3Ox2PajsCItGLyEsickhEttRZfpmIfCciu0XkES829d/Amy0TpQpmvmiDxpjtxpiZwE2AXoKpWk1AXHUjIhOw5ryfa4wZYi8Lxfqxk4uxfslqLXArEAr8oc4m7sH6BaxEIAo4bIx5v3WiV8HAF23QGHNIRK4GHgGeMca83lrxq7bNq/nonWaMWSYiqXUWnw/sNsZkA4jIfGCaMeYPwBlDMyJyERADDAbKReRDY4yrRQNXQcMXbdDezrvAuyLyAaCJXrWKgEj09egG7HW7nw+Mrq+wMeaXACJyN1aPXpO8aq5GtUERyQCuAyKBD1s0MqXcBHKiFw/LGhyHMsb8y/ehqDaqUW3QGJMJZLZUMErVJyBOxtYjH+jhdr87UOBQLKpt0jaoAkIgJ/q1QD8R6S0iEcAtwLsOx6TaFm2DKiAERKIXkXnAKmCAiOSLyA+MMdXAj4FPgO3Am8aYrU7GqYKXtkEVyALi8kqllFJNFxA9eqWUUk2niV4ppYKcJnqllApymuiVUirIaaJXSqkgp4leKaWCnCZ6pZQKcprolVIqyGmiV0qpIPd/oL6Y/VlKDQQAAAAASUVORK5CYII=\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (logistic)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "# see the same technique will improve the prediction of the 1-layer nerual network\n",
    "\n",
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "    # Input data. For the training data, we use a placeholder that will be fed\n",
    "    # at run time with a training minibatch.\n",
    "    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n",
    "    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "    tf_valid_dataset = tf.constant(valid_dataset)\n",
    "    tf_test_dataset = tf.constant(test_dataset)\n",
    "    beta_regul = tf.placeholder(tf.float32)\n",
    "\n",
    "    # Variables.\n",
    "    weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "    biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "    weights2= tf.Variable(tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "    biases2= tf.Variable(tf.zeros([num_labels]))\n",
    "\n",
    "    # Training computation.\n",
    "    lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "    logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=logits)) + beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2))\n",
    "\n",
    "    # Optimizer.\n",
    "    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "\n",
    "    # Predictions for the training, validation, and test data.\n",
    "    train_prediction = tf.nn.softmax(logits)\n",
    "    lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "    valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "    lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "    test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 745.727783\n",
      "Minibatch accuracy: 9.4%\n",
      "Validation accuracy: 33.9%\n",
      "Minibatch loss at step 500: 198.241684\n",
      "Minibatch accuracy: 75.8%\n",
      "Validation accuracy: 76.4%\n",
      "Minibatch loss at step 1000: 113.561928\n",
      "Minibatch accuracy: 89.1%\n",
      "Validation accuracy: 81.8%\n",
      "Minibatch loss at step 1500: 72.227776\n",
      "Minibatch accuracy: 75.8%\n",
      "Validation accuracy: 78.2%\n",
      "Minibatch loss at step 2000: 41.344685\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 84.5%\n",
      "Minibatch loss at step 2500: 25.303534\n",
      "Minibatch accuracy: 83.6%\n",
      "Validation accuracy: 85.7%\n",
      "Minibatch loss at step 3000: 15.510210\n",
      "Minibatch accuracy: 86.7%\n",
      "Validation accuracy: 86.5%\n",
      "Test accuracy: 93.1%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 3001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "    tf.global_variables_initializer().run()\n",
    "    print(\"Initialized\")\n",
    "    for step in range(num_steps):\n",
    "        # Pick an offset within the training data, which has been randomized.\n",
    "        # Note: we could use better randomization across epochs.\n",
    "        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "        \n",
    "        # Generate a minibatch.\n",
    "        batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "        batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "        \n",
    "        # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "        # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "        # and the value is the numpy array to feed to it.\n",
    "        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        if (step % 500 == 0):\n",
    "            print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "            print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "            print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n",
    "    print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the accuracy by the meta parameter value\n",
    "\n",
    "num_steps = 3001\n",
    "regul_val = [pow(10, i) for i in np.arange(-4, -2, 0.1)]\n",
    "accuracy_val = []\n",
    "\n",
    "for regul in regul_val:\n",
    "    with tf.Session(graph=graph) as session:\n",
    "        tf.global_variables_initializer().run()\n",
    "        for step in range(num_steps):\n",
    "            # Pick an offset within the training data, which has been randomized.\n",
    "            # Note: we could use better randomization across epochs.\n",
    "            offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "\n",
    "            # Generate a minibatch.\n",
    "            batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "            batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "\n",
    "            # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "            # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "            # and the value is the numpy array to feed to it.\n",
    "            feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : regul}\n",
    "            _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        accuracy_val.append(accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXAAAAEMCAYAAADd+e2FAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjAsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+17YcXAAAgAElEQVR4nO3dd3xV9f3H8dcnCdkhkBBCBntDSFgiomAQpdaBDLWualtHbbWuWmfV1qKlQ/2JddSf9qe2Cg6g4KiCYBBFREQSQtgzCSGEQBKy1/f3xz2xIWTcjJtzb+7n+XjkkeTcMz7n3u9933O/Z4kxBqWUUp7Hx+4ClFJKtY0GuFJKeSgNcKWU8lAa4Eop5aE0wJVSykNpgCullIfSAFcdSkQCRcSISLzdtbSWiGwQkevaMf1eETmrg2sKEJFiEYntyPnWm/8zInJrG6e9UET2dHRNdhORSSKSYncdzvC6ALfeDHU/tSJSVu//a9sx33a9+ZXnM8YMNsZ81Z55NGxHxpgKY0yoMeZw+ys8bVlxwOXAP6z/Q0RkiYgctD6EJ3f0Mt1NYxscxpiNQK2IXGBjaU7xugC33gyhxphQ4BBwab1hb9pdn6uIiJ/dNbSXu66Du9blhJ8B/zbGVFr/G2AtcA1wwraqmtGJz/WbwM87aVltZ4zx2h/gAHB+g2G+wCPAPuAYjheyh/VYCLAYOA4UAF8DPYGngBqgHCgGnmpkWX7AEiDXmvYzYHi9x0OAhUAmUIjjjeRnPZYMbLCGHwKusYZvAK6rN49bgU+tvwNxvCF/AewFdljDXwSygCJgIzC5QY2PWeteBHwD9AFeBZ5osD6rgFsbWc+65d5uPb95wBOAAMHWfIfWGz8eKK17jhvM61ZgDfA8jkD5rTX858BO63X4EIirN83FwG7rOf6f+s8RsAB4pd64I4Dqev/XH3cEkGItIw94HQirN+4R4F5gG1Bab9g5ONpQcb2fEus56QNEAf+x5nkcWA7EWNOf1o7qPZ/x1jgRwFvW9PuB+wCp93ytxtGOCqzX/fyGz2u9dVgPXN7EY8fqt40mxrkQ2FPv/0etmk4C6cDF1vAWX3dgDpBm1b0OGNXcc91Em7vZWucTwDMNxmm0zeB4DxjrNSoGZlvDB1vr4Wt3TjX7GthdgK0r33iAP2A1oFirYbwG/J/12J3Ae0AQjrA7AwixHjslTBtZlh9wAxBqzfdFYEO9x18FVlpvcl9gqvV7iNWw5lnziAKSGlsmjQf4h0APIMgafj2OD51uwMM4PjC6WY89AnxnLdMHGGdNO816Y9YFRaz15otoZD3rlvuJNe1AHB8IdcH4D+D39ca/H3i3iefsVqDaemP6Ws/7VcB2YJi1DvOBz6zx+1jP1SXWY/cBVbQ9wM8D/K35bgAW1Bv3CI4PuNh6z+0R4JxG1uNp4FNrHaKBy6x1CccR4Isbq6HB81kX4O8A71rtaIj1ulxb7/mqsl5jX+Bu4EAzbfIkMKaJx9oS4D8CYqy282Nr/r1aet2ByUAOMMGq+xZgF//dgDntuW6izS0FulttrgBIth5vrs2c8vw2mG8lMMzunGr2NbC7AFtXvvEA3w+cXe//gTjCSoBf4tgyTmhkXs0GeCPj9wFqrQbUzXrjDW9kvN8Di5qYhzMBPqWZGsRat+HW/weBHzQx3j5gqvX/vcDSJuZZt9zkesPuAT60/j63wZt+KzCriXndCuxqMOwzrMCy/q977qKtN/5n9R7zAY7ShgBvpJargK/q/X8E65tQg2HnNBh2PbCHRj7srMcnAznNvKbfBwwQgGMLfVC9x+8EPq73fKXXeyzCmraxbze+1mMDmqir1QHeyOM76tpTc6878H/Aww2mPQic2dRz3USbm1hv2ArgLifaTHMBng9Mau45sPvH6/rAmyMiAvQFPhKRAhEpwLFF6gNE4thKXgu8JyJZIvKkiPg6OW8/EfmriOwTkSIcjVus+cbg2Lre18ikfXF8LWyrzAZ1PCgiO0WkEMdXzUCgl7XucY0tyzha8xtA3c6164B/tmK5B3FsPQF8DviKyFkiMhbHuv/H2fqB/sBL9V6fPBxb6fHWMr4f3xhTC2S3UGejRCRWRN4VkWzr9XoF6NVCbQ3nMQlHN8hsY8xxa1iYiPxDRA5Z813ZyHyb0gdHWzxUb9hBHK9bnSP1/i61foc2nJExpgbHFnKYMwsWkWH1dvYfa2KcG0Ukrd5rM4T/rltzr3t/4KG66axpoxqsV7PPtaXhutetd3NtpjlhOLbk3ZYGeD1WUGUD5xljetT7CTTGHDOOIwIeNcaMwNGtcAWOLTNwfIo356fATGA6jq/OI6zhguPrYzUwqJHpMnH0xzWmBEf/Yp0+ja1W3R/WXvVf4ehv7IFjC60MR9dI3bo3taw3gMtFZAKOD5UPmxivTt96f/cDDsNpHwY/xtF9UNXMfBo+r5nATxq8PkHGmG9xPI/fvylFxIdTQ8CZ56vOX6zxE4wx3YGbcLxWzdX2PRGJwfGV/iZjTHq9hx6wajzDmu/MBvNtrh0dwfGtrV+9Yf1o44cUjj7nYc6MaIzZZf67s/+0DxwRGQY8h+NbUIQxpgeObx5iTd/c654JPNrgNQ02xiytX0Ib17Fu/k21mUbnKyKDgQoa36hyGxrgp3sJWCAifQFEpLeIXGr9fb6IjLKCoQhH6NZY0+XSeADXCcOxcyofxw7L+XUPWA35DeBZEYkWEV8ROcfaun8DuERE5ljDo0Qk0Zp0C45QDRSREcBPWli3MBxfHfNw9O0+jmMLvM4rwJMiMkgcxolID6vGfUAGjq+7b5v/HrnQlPtFJFxEBuDYofl2vcfeAK4Errb+bo2XgN+KyHAAEekpIvOsx1YAZ4rIRdbRCvfg6O+vswWYLiJxItITRz9sU8Jw9KcXiUg/a15OERF/YBnwd2PM8kbmWwoUiEgv4LcNHm+yHRljKqz5Pmkd8jcYRxfKv5ytrYGPcHRt1K89QETq2oR/vb9bEorjwyUP8LGOLR/SYJymXveXgV+JyESr3YWKyCwRCaZjNNlmrOe0kNOf83OBVcaY6g6qwSU0wE/3Zxw7nNaIyEkce+rHW4/F4djpVLeX/SMcO5UAngGuF5ETIvLnRub7Ko7GfQRH/98XDR6/A0f3xXc4Qv4POLaM9+LY6fUQji6PTcDoerX6WfN9mZbfyO/j+Cq7l/8eZZNX7/EFOLas1+D4gHoJR79rndeBMbTcfYI1n1Sr3nfr12at007gpHEcc+s0Y8wi4G/AUqsLYgtwgfVYDo5wWGitWzyO57qiXk0f4Pgg2gD8u5lFPYrjiJJCHKG5pBVlDgLOxPEhVv+8g97AX3F0K+TjaAMfNZi2pXZUd2jbQRyv0ys4jpRqi9eA2dYHTp2DOL6VReLoLiwTkea+qQBgjNmMo71swvFNaKD1d/1xGn3djTFf4mj/f8fRZbELx6GM7dnqrr/cJtuM5VHgXauLZZY17Fprfdxa3VEFSrVIRGYCLxhjGm5ZtWVebwEZxpj5LY7c9mX44fjAvNS08wSbrkpEnsaxo7hTwqozXvf2EpGJOA4FPrfFkW2mAa6cYm2lLQU+N8Y0tmXYmnkNATYDI40xbe2/bWreP8TxrakCx2GSNwBDnOjyUS7mytfdW2kXimqRddTACRz9t8+3c15/xtFN9LiL3sR1x6wfBWYAczS87dcJr7tX0i1wpZTyULoFrpRSHkoDXCmlPFSnXkWtV69eZsCAAW2atqSkhJCQkI4tSCknaftTdvr222+PGWOiGg7v1AAfMGAAmzZtannERqSkpJCcnNyxBSnlJG1/yk4icrCx4dqFopRSHkoDXCmlPJQGuFJKeSgNcKWU8lAa4Eop5aE0wJVSykN56t20lfIKJ8urOJhfSnlVDRMHRNhdjnIzGuBK2aygtJID+aUczC/hwDHrd34Jh46Xcqz4v9fheu7qcVyaFNvMnJS30QBXqhPkF1ew/1gJB/JLOZRf8t/Azi+lsOzUO8rFhAfSPzKY80dG0z8yhAGRwbz0+T4eWZ7OmYMi6B3m7E1yVFenAa6Ui73x1QEeW7GNugt/+gjE9QxiQGQIlybFMCAy5Pug7hsRTGC30++TPTQ6jIsWruPhZem8/OMJOO5BrbydBrhSLrRhXz6/fz+DqUOj+OmUAfSPDCa+ZzD+fq07fmBI71B+M3M4T3y0neVbDjN7XFzLE6kuTwNcKRfJKSzj9rc20z8imL9dM47ugd3aNb+fnTOQj7cd4dHl6Zw1OJLo7tqV4u30MEKlXKCiuoZb/7WZssoaXr5+QrvDG8DXR/jL5YlU1tTy4NKt6M1YlAa4Ui7wuxXbSM0s4KkrkxjSO6zD5jsoKpT7fjCCNTuO8t63WR02X+WZNMCV6mCLNh5i0cZMfpk8mAsTYjp8/j+ZMoBJAyN4/P0McgrLOnz+ynM4FeAicqeIpIvINhG5yxr2BxFJE5EtIrJSRPQAVeX1vjt0gseWb2PasCh+PXO4S5bh4yP89fIkaozh/iXaleLNWgxwEUkAbgYmAUnAJSIyFPiLMSbRGDMW+AB41KWVKuXm8k5W8It/bSY6PICFV43F18d1h/r1iwzmwR+O4PNdeSz+JtNly1HuzZkt8JHABmNMqTGmGlgLzDHGFNUbJwTQzQDltapqarntzc0UlFXy0nUT6BHs7/JlXntmf6YMjmT+BxlknSh1+fKU+3EmwNOBaSISKSLBwEVAXwAReUJEMoFr0S1w5cWe/Gg7Gw8cZ8HcREbHhnfKMn18hD/NSwTgvvfSqK3VbShvI870n4nIjcBtQDGQAZQZY+6u9/iDQKAx5rFGpr0FuAUgOjp6wuLFi9tUaHFxMaGhoW2aVqn2aq79rT9czctpFVzQ349rRwZ0cmWQklnFa9squX6UP+f1a//hisr9TJ8+/VtjzMSGw50K8FMmEHkSyDLGvFBvWH/gQ2NMQnPTTpw40ehNjZUnaqr9bTtcyLwX15MU34N/3XQm3Xw7/8AuYwzX/2Mj3x48wcd3TqNfZHCn16BcS0QaDXBnj0Lpbf3uB8wFFlk7MuvMAnZ0RKFKeYoTJZX8/J/f0iPIn79dM96W8AYQcXSl+Ipw73up2pXiRZxtcUtEJAN4H7jNGHMCWGAdWpgGzATudFWRSrmbmlrDHYu/42hRBS/9eAJRYZ3fdVJfbI8gHrl0FBv3H+f1rw7YWovqPE5dC8UYM7WRYfM6vhylPMNTK3eybvcxFswdw9i+PewuB4ArJsTzn605/OnjHSQP783AXiF2l6RcTM/EVKqVPk7P4YWUvVw9qR9XTepndznfExEWzEvE39eH37ybSo12pXR5GuBKtcLu3JP8+p1Uxvbtwe9mjbK7nNNEdw/k95eNZtPBE/zji/12l6NcTANcKScVlVfx839+S5C/Ly9eN54Av9NvvOAOZo+N44JR0fxl5U72HC22uxzlQhrgSjmh1hjueTuVQ8dLef6a8cSEB9ldUpNEhCfmJBDs78uv302luqbW7pKUi2iAK+WED/ZV8en2XB6+eCRnDoq0u5wW9Q4L5PHLEkjNLOB/12lXSlelAa5UC9buymPZ7irmjIvjJ1MG2F2O0y5NjOGHCX14ZtUudh45aXc5ygU0wJVqRm5ROXe/vYW4UOHJOWM86mbCIsL82QmEBfpxzztb2Jun/eFdjQa4Uk2orqnljkXfUV5Vwy/HBhLk7547LZsTGRrAH+eOYceRk8x4ai2XPf8lr68/wPGSSrtLUx1AA1ypJixcs4ev9x9n/uwEYkM9960yc3QfvnrgPH578Uiqqmt5bMU2Jj3xKTe9vomPtuZQXlVjd4mqjfSu9Eo1Yv2eYzy3ZjeXT4hn7vh4UlL22F1Su/TuHshNUwdx09RB7DhSxLLN2Sz7LptPt+fSPdCPixNjmTs+jon9e3pUN5G30wBXqoG8kxXc+fYWBvUK4fHLRttdTocb0ac7D17UnfsuHMH6vcdYtjmbf3+XzaKNh+gbEcSccfHMHRfHAD0V3+1pgCtVT22t4Z53tlBUVsU/b5xEsH/XfYv4+ghTh0YxdWgUf5hdzSfbjrB0czbPrdnNwtW7Gd+vB3PGx3NpYkyn3GFItV7XbZ1KtcGLa/eybvcx/jh3DCP6dLe7nE4TEuDH3PGO7qIjheUs35LN0s3ZPPLvdB5/fxszR/Xhj/PG0D1QbxjhTjTAlbJs3H+cp1buZFZSLFed0dfucmzTJzyQn587mFumDSIjp4ilm7N5bf0B/P18eOZHY+0uT9WjAa4UcLykkjsWfUe/iGCemJOgO/JwHEc+Ojac0bHhhAb48ezq3cwY2ZtLEmPtLk1ZPPfYKKU6iDGGe99N5XhJJX+7Zjxh2k1wmtvPG0JS3x48vCydI4XldpejLBrgyuu9+sV+1uw4ysMXjyQhrnPuKO9puvn68D8/GktldS33vqu3bXMXGuDKq3136AQL/rODH4yO5vqz+ttdjlsb2CuE314yki/2HOO19QfsLkehAa68WGFpFbe/9R19wgP587wk7fd2wjWT+jFjRG8WfLyDXbl6gSy7aYArr2SM4f4laeQWlfPc1eMID9Z+b2fU3bYtLMCPuxZvobJarzVuJw1w5ZX+ueEgH287wv0XjmBcv552l+NRosICWDAvkYycIp5etcvucryaBrjyOunZhcz/YDvnjejNjecMtLscj3TBqGiuOqMvf/98L1/vy7e7HK+lAa68ysnyKm5/azMRIf789YokfHy037utHrlkFP0igrnnnVSKyqvsLscraYArr2GM4aFl6Rw6XsrCq8cREaLX92iPkAA/nr5yLDmFZfxuxTa7y/FKGuDKa7z9TSbvpx7mnguGMWlghN3ldAkT+vfk9ulDWLo5m4+25thdjtfRAFdeYeeRkzy2YhtTh/bil8lD7C6nS/nVjKEkxofz0LKt5BbpWZqdSQNcdXmlldXc9tZmwgK78fSVY7Xfu4N183Vc5Kq8qobfvJeGMXqWZmfRAFddWt7JCq595Wv25hXz7FVjiQoLsLukLmlwVCgPXzSSz3fl8cZXB+0ux2togKsua8eRImY//yXbc4p44ZrxnD2kl90ldWnXTe5P8vAonvxoO3uO6lmanUEDXHVJa3bkMu+F9VTX1vLuz6fwwzExdpfU5YkIf56XSLC/L3e9rWdpdgYNcNWlGGN4Zd0+bnp9EwOjQlh+2zmMidcrDHaW3t0D+ePcRNKzi3h2tZ6l6Woa4KrLqKqp5aFlW5n/4XZmjurDOz8/iz7hgXaX5XUuTOjDFRPieTFlL5sOHLe7nC5NA1x1CQWlldzwj40s2pjJbdMH88K147v0DYnd3WOzRhPXM4i739nCST1L02U0wJXH25dXzJwX1rPpwAmevjKJ3/xghB4qaLPQAD+euXIs2SfKePz9DLvL6bI0wJVHW7/3GHNeWE9hWRVv3nwmc8fH212SskwcEMEvkgfz7rdZfJyuZ2m6gga48liLNh7i+lc30jssgH//8mzOGKCnx7ubO2cMIyGuOw8u3cpuvQFEh3MqwEXkThFJF5FtInKXNewvIrJDRNJEZJmI9HBtqUo51NQa5n+QwYNLt3L2kF4s+eUU+kUG212WaoS/nw/PXjUOXx8fZj//Jasycu0uqUtpMcBFJAG4GZgEJAGXiMhQYBWQYIxJBHYBD7qyUKUAiiuqueWNTbzyxX5+MmUAr94wke56F3m3NjgqlPd/dTaDokK5+Y1NPLd6t55u30Gc2QIfCWwwxpQaY6qBtcAcY8xK63+ADYB2PiqXyjpRyuUvridlVx5/mJ3A72aNxs9XewE9QUx4EO/eehazx8by1Kpd3PbWZkorq1ueUDXLmdafDkwTkUgRCQYuAvo2GOdnwH86ujil6mw+dILZz39JdkEZr/30DH48We8g72kCu/nyzI/G8tBFI/g4/QhzX1hP5vFSu8vyaOLMVxkRuRG4DSgGMoAyY8zd1mMPAxOBuaaRmYnILcAtANHR0RMWL17cpkKLi4sJDQ1t07TKs+0+UcOfviknIlC4a3wgsaGdv9Wt7a9jpeVV82JqBb4Ct40NZGSkr90lubXp06d/a4yZ2HC4UwF+ygQiTwJZxpgXROQG4FZghjGmxY/SiRMnmk2bNrVqeXVSUlJITk5u07TKs9399hY+23mUNb9Otu0uOtr+Ot6+vGJufmMTB/JLeezSUfx4cn9E9Pj9xohIowHu7FEova3f/YC5wCIRuRC4H5jlTHgr1RZVNbWs3p7LjBHRegu0LmZQVCj/vu1skodF8ejybTy4dKteAKuVnP0uukREMoD3gduMMSeAvwFhwCoR2SIiL7mqSOW9vtl/nKLyamaOjra7FOUCYYHdePn6idw2fTCLv8nkmv/dQN7JCrvL8hhOXSzCGDO1kWF6Xyrlciszcgnw82HqUL2Wd1fl6yP85gcjGNGnO795L5VZf/uCv/94AonxempJS/QYLOW2jDGsyshl6tBeemEqL3BpUixLfjEFHxGueOkr/v1dtt0luT0NcOW2MnKKyC4oY+aoPnaXojrJ6NhwVtx+Nkl9e3DX21v440fbqanVk36aogGu3NaqjFxE4LyRve0uRXWiyNAA3rzpTK6b3I+/f76Pn732DYVleknaxmiAK7e1clsuE/r1pFeo3ojY23Tz9WH+7DE8MSeBL/ccY/bzX7Jxv94coiENcOWWsk6UkpFTpEefeLlrz+zPWzdPpqKqhiv//hV3Lv6OI4XldpflNjTAlVuqu2rdBdr/7fUmDYxg9a+TueO8Ifwn/QjnPZXCiyl7qaiusbs022mAK7e0KiOXIb1DGdgrxO5SlBsI8vflnpnD+fTuczl7SC/+9PEOLvyfdXy246jdpdlKA1y5ncLSKr7ef5yZo7T7RJ2qX2Qw/3v9RF7/2SQE+Olr33Dja99w4FiJ3aXZQgNcuZ01O3OpqTVcoAGumnDusCg+vmsaD100gg378pn5zOf85ZMdXneJWg1w5XZWZeTSOyyAJD0TTzXD38+HW6YN5rN7k7kkMYbnP9vLeX9dy4rUw15zwwgNcOVWyqtqWLszj/NHReud5ZVTencP5OkfjeW9W88iMtSfOxZ9x1Uvb2B7TpHdpbmcBrhyK1/tzaekska7T1SrTRwQwYrbz+HJOWPYlXuSixeu47Hl6RSUVtpdmstogCu3sjIjlxB/X6YMjrS7FOWBfH2Ea87sx2f3JnPd5P78c8NBpv81hUUbD3XJbhUNcOU2amsNn27PJXl4bwL89A4tqu16BPvz+GUJfPCrqQyNDuPBpVt58+tDdpfV4TTAldvYklVA3skK7T5RHWZUbHfevmUy5wzpxYL/7CCnsMzukjqUBrhyG6sycvHzEaYP14tXqY4jIjw5Zww1tYaHl6V3qa4UDXDlNlZuO8KZgyIID+5mdymqi+kXGcy9PxjOmh1HWZF62O5yOowGuHILe/OK2ZtXotf+Vi7zkykDGNu3B79/P4P84q5x2zYNcOUW6i5edb72fysX8fUR/nx5IifLq3j8gwy7y+kQGuDKLazKyGV0bHfiegTZXYrqwoZFh3H79KEs33KY1dtz7S6n3TTAle3yTlaw+dAJ7T5RneIXyYMZHh3Gw8vSOVnu2Xf60QBXtlu9PRdj0MMHVafw9/PhT5cncvRkOQv+s8PuctpFA1zZblVGLnE9ghgZE2Z3KcpLjO3bgxvPGcibXx9iw758u8tpMw1wZauSimrW7TnGzNHRiOjFq1TnueeC4fSLCOaBJWmUV3nm3X00wJWt1u3Oo7K6VrtPVKcL8vdlwbwxHMgv5ZlPd9ldTptogCtbrczIJTyoG5MGRNhdivJCUwb34upJffnfz/eRllVgdzmtpgGubFNdU8uaHUeZMaI3fr7aFJU9HvjhSKLCArjvvTSqamrtLqdV9F2jbPPNgRMUlFZp94myVXhQN+bPHsOOIyf5+9q9dpfTKhrgyjarMnLx9/Nh2rAou0tRXu6CUdFckhjDwtV72HP0pN3lOE0DXNnCGMPKjCOcM6QXIQF+dpejFL+bNZrgAF/uey+NmlrPuGKhBriyxY4jJ8k6UabdJ8pt9AoN4LFLR7H5UAH//OqA3eU4RQNc2WJVRi4iMGOkXvtbuY/ZY+NIHh7Fnz/ZSebxUrvLaZEGuLLFyowjjOvbg95hgXaXotT3RIQn5oxBgIeWbXX7mz9ogKtOd7igjPTsIi7Qi1cpNxTXI4gHfjiCdbuPsWRztt3lNEsDXHW6umt/zxyt/d/KPV17Zn/OGNCTP3yQwdGT5XaX0yQNcNXpVmXkMigqhMFRoXaXolSjfHyEBfMSKauq4XcrttldTpOcCnARuVNE0kVkm4jcZQ27wvq/VkQmurZM1VUUllWxYV++Hn2i3N7gqFDuOn8oH209wsfpOXaX06gWA1xEEoCbgUlAEnCJiAwF0oG5wOcurVB1KSk7j1Jda/TmDcoj3Dx1EKNju/PI8m0UlFbaXc5pnNkCHwlsMMaUGmOqgbXAHGPMdmPMTteWp7qalRm59AoNYFzfHnaXolSLuvn68Kd5iZwoqeSBJe53VIozAZ4OTBORSBEJBi4C+rq2LNUVVVTXsHZnHheM6o2Pj177W3mGhLhw7rtwOB9vO8JbGw/ZXc4pWjyH2RizXUT+BKwCioFUoNrZBYjILcAtANHR0aSkpLSp0OLi4jZPq9xDWl41xRXVRFcf9bjXUtufdxtiDAmRvvxueToc3UtcmHsc/+HURSiMMa8CrwKIyJNAlrMLMMa8DLwMMHHiRJOcnNz6KoGUlBTaOq1yD6uWbSXYP5tb50wnsJuv3eW0irY/lTCxnIueXccbe/xYfvvZbtGGnT0Kpbf1ux+OHZeLXFmU6npqaw2rMnI5d1iUWzR8pVqrd1ggT105lp25J5n/YYbd5QDOHwe+REQygPeB24wxJ0RkjohkAWcBH4rIJy6rUnm8tOxCjp6s0MMHlUc7d1gUt0wbxL82HOLj9CN2l+N0F8rURoYtA5Z1eEWqS1qVcQRfH+G8EXrxKuXZ7p05nK/25nP/kjQS48OJ7RFkWy3u0ROvuryV23KZNCCCHsH+dpeiVLv4+/nw3NXjqK6p5a7FW6i28TZsGuDK5fYcLWb30WLtPlFdxoBeITJuH54AABDTSURBVMyfk8DGA8f522d7bKtDb4WiOsTJ8ioO5pdyML+UA/klHMwv4UB+KQfzS8gtqgDQAFddypxx8azbdYyFq3czZXAvJg2M6PQaNMCV0wpKK78P5QPHrN/5JRw6Xsqx4lNPM44KC2BAZDBTh0YxIDKYxPge9I0ItqlypVzj8dkJbD50grsWf8dHd07t9C5CDXDVojU7crnnnVQKSqtOGR4bHkj/yBAuGBVN/8gQBkQG0z8yhH4RwXqfS+UVQgP8eO7q8cx98UvuX5LGS9dNQKTzzjLWd5lq0Udbj1Bba/jtxSO/D+q+EcF6PLdSwJj4cO77wQie+Gg7b359iOsm9++0ZWuAqxalZhYwoX9Pbpo6yO5SlHJLN54zkHV7jvGHDzI4Y0AEw/uEdcpy9SgU1aziimr25BWTGK9XD1SqKT4+wlNXJBEW2I1fLdpMeVVN5yy3U5aiPFZ6diHGwFi9/KtSzYoKC+DpK5PYlVvMHz7onFPtNcBVs1IzCwBIjA+3uRKl3N+0YVH8fNog3vz6UKfcxUcDXDUrLauQ+J5BRIYG2F2KUh7h1zOHkxQfzn3vpZFdUObSZWmAq2alZhWQpP3fSjnN38+HhVePo9bA3S4+1V4DXDUpv7iCrBNl2n2iVCv1jwxh/mzHqfbPrXHdqfYa4KpJaVmFACTpDkylWm32uDjmjo/juTW7+XpfvkuWoQGumpSaVYCI456ASqnWe/yyBPpHhnDX21tccld7DXDVpNTMAoZEhRKqp8Ur1SahAX4svGocxeXVbLGO6OpI+s5UjTLGkJZVyHS9AYNS7TImPpwvHjiP8KBuHT5v3QJXjco6UUZ+SSVJugNTqXZzRXiDBrhqgu7AVMr9aYCrRqVlFeDv68OIPt3tLkUp1QQNcNWoLZkFjIwJw99Pm4hS7krfneo0NbWG9OxC7T5Rys1pgKvT7MsrpqSyRi8hq5Sb0wBXp6k7XlWPQFHKvWmAq9OkZRUSGuDHoKhQu0tRSjVDA1ydJjWrgIS47vj6dN7NWZVSracBrk5RUV3D9pwi3YGplAfQAFen2JFzkqoao9cAV8oDaICrU6Rm6S3UlPIUGuDqFKmZhfQK9SeuR5DdpSilWqABrk6RllVAYnwPRHQHplLuTgNcfa+4opo9ecXafaKUh9AAV9/bmlWIMXoFQqU8hQa4+l7dDkw9AkUpz6ABrr6XllVA34ggIkL87S5FKeUEDXD1vdTMQr2AlVIexKkAF5E7RSRdRLaJyF3WsAgRWSUiu63fPV1bqnKlY8UVZBeU6QWslPIgLQa4iCQANwOTgCTgEhEZCjwArDbGDAVWW/8rD5Wm/d9KeRxntsBHAhuMMaXGmGpgLTAHuAx43RrndWC2a0pUnSE1sxAfgYQ43QJXylP4OTFOOvCEiEQCZcBFwCYg2hiTA2CMyRGR3o1NLCK3ALcAREdHk5KS0qZCi4uL2zytatlnqeXEhAjffPWF3aW4JW1/yh21GODGmO0i8idgFVAMpALVzi7AGPMy8DLAxIkTTXJycpsKTUlJoa3TquYZY7hn3afMGBFDcnKS3eW4JW1/yh05tRPTGPOqMWa8MWYacBzYDeSKSAyA9fuo68pUrpR1oozjJZUk6gk8SnkUZ49C6W397gfMBRYBK4AbrFFuAJa7okDlenUn8IzVHZhKeRRn+sABllh94FXAbcaYEyKyAHhHRG4EDgFXuKpI5VppWYX4+/owvE+Y3aUopVrBqQA3xkxtZFg+MKPDK1KdbktmASNju+Pvp+d1KeVJ9B3r5WpqDenZhYzVE3iU8jga4F5ub14xpZU1egq9Uh5IA9zLbcm0zsDsq1vgSnkaDXAvl5ZVQGiAH4N6hdpdilKqlTTAvVxaViFj4sLx8dFbqCnlaTTAvVhFdQ3bc4r0DjxKeSgNcC+2PeckVTVGLyGrlIfSAPdiqdYOTD2FXinPpAHuxVKzCugVGkBseKDdpSil2kAD3IulZRWSFB+OiO7AVMoTaYB7qZPlVezNK9YTeJTyYBrgXmprdiHG6Ak8SnkyDXAvlZZVCKBb4Ep5MA1wL5WaWUC/iGAiQvztLkUp1UYa4F4qLauQRD3+WymPpgHuhfJOVpBdUEaSdp8o5dE0wL1QWlbdFQg1wJXyZBrgXig1qxAfgYS47naXopRqBw3wTpBxuIiFq3dzuKDM7lIAxw7MYdFhBPs7e0tUpZQ70newi5VX1XDbW5vZf6yEhat3c3FiDDdPHURCnD07EI0xpGUVcMGoaFuWr5TqOBrgLvbcmt3sP1bCU1ckkZFTxNvfZLJ8y2EmD4rg5qmDmD68d6deizvrRBknSqv0+G+lugANcBfanlPE39fuY974eOZNiGcecOf5Q1m88RD/9+UBbnx9E4OjQrjxnEHMHR9HYDdfl9dUdwu1sboDUymPp33gLlJTa3hg6VbCg7rx24tHfj+8e2A3bpk2mM/vm86zV40l2N+Ph5ZtZcqCNTy9ahfHiitcWldaVgH+fj4M7xPm0uUopVxPt8Bd5I2vDpCaWcCzV42lZyNnO3bz9eGysXHMSorl6/3HeWXdPhau3s1La/cyd1wcN00dyJDeHR+yqVmFjIrpTjdf/exWytNpgLtAdkEZf/lkJ+cOi2JWUmyz44oIkwdFMnlQJHvzinn1i/0s+TaLxd9kMn14FDdPHcRZgyM75JKvNbWG9OxCrpzYt93zUkrZTzfDOpgxhkf+nY4xMH92QquCd3BUKE/OGcP6B87j7vOHsTW7kGte+ZqLF37B8i3ZGGPaVdueo8WUVtboKfRKdREa4B3sg7Qc1uw4yq9nDqNvRHCb5hEZGsCd5w/li/vPY8HcMVTW1HLn4i1c9+rXHMovbXNtdbdQ0zMwleoaNMA7UEFpJb9/fxuJ8eH89OyB7Z5fYDdfrprUj5V3TWP+7ARSMwuZ+T9reWXdPmpqW781nppVQFiAHwMjQ9pdm1LKfhrgHejJj7ZzorSKBXMT8e3AY7t9fITrJvdn1T3TOHtwL+Z/uJ25L3zJ9pyiVs0nLauQMfHhnXrcuVLKdTTAO8j6vcd4Z1MWN08dxKhY11xjJCY8iFdumMjCq8eRdaKMS5/7gqdW7qSiuqbFacuratieU6TdJ0p1IRrgHaC8qoaHlm6lf2Qwd50/1KXLEhFmJcWy6p5zmZUUy3Nr9nDxwi/49uDxZqfbnlNEda0hSXdgKtVlaIB3gIWrd3Mgv5Qn54zplLMpASJC/Hn6R2N57adnUFZZw+UvfcVjy9MprqhudHy9hZpSXY8GeDttzyni5c/3cfmEeM4e0qvTl588vDef3D2NG84awBsbDjLz6bV8tvPoaeOlZhYQFRZATHhgp9eolHINDfB2qKk1PLAkjfCgbjx80ciWJ3CR0AA/fjdrNO/dOoXgAD9++n/fcNfi7zheUvn9OKlZBSTFh3fICUFKKfegAd4Or68/QGpWIY9eOqrR0+U724T+PfnwjnO4Y8ZQPtyaw/lPr2X5lmyKyqvYm1eit1BTqovRU+nbKOtEKX9duZPk4S2fLt+ZAvx8ueeCYVw8Job7lqRx5+ItjLAuXJWoR6Ao1aU4tQUuIneLyDYRSReRRSISKCLnichma9jrIuI1HwZ1p8tD60+X7yzD+4Sx9BdTeOSSURzML0UEEm26iYRSyjVaDF0RiQPuAEYZY8pE5B3gGuD3wAxjzC4ReRy4AXjVpdW6iffTcvhsZx6PXDKK+J5tO12+M/j6CDeeM5CZo6LJPFHqFt08SqmO42wfuB8QZG1lBwMlQIUxZpf1+CpgngvqczsFpZU8/v42kuLD+cmUAXaX45S+EcFMGdz5R8gopVyrxS1wY0y2iPwVOASUASuBd4A/i8hEY8wm4HKg0WuUisgtwC0A0dHRpKSktLrIkipDVVlJm6btaK9ureB4STV3JPqw7vO1dpejOklxcbFbtD+l6nOmC6UncBkwECgA3gWuBa4CnhGRAByh3ugZJMaYl4GXASZOnGiSk5NbXeT8DzJ4a+N+LkrqycWJMZwzpJctNyRYv+cY6z7+ml8kD+b6C0d0+vKVfVJSUmhL21XKlZzZ8Xg+sN8YkwcgIkuBKcaYfwFTrWEzgWGuKnLGyGi278vkk21HeO/bLMKDunHh6D5cnBjDWYMjOyXMy6tqeHDZVgZEBnPnDNeeLq+UUs5wJsAPAZNFJBhHF8oMYJOI9DbGHLW2wO8HnnBVkWcNjqQiMYCzzpnKul3H+HBrDh9uzeHtTZn0DO7GhQl9uHhMLJMHReDnojB/dvVuDuaX8tZNZ3ba6fJKKdUcZ/rAvxaR94DNOLpJvsPRJTJfRC7BsSP0RWPMGpdWiuMY5/NHRXP+qGjKq2r4fFceH27NYcWWwyzamElEiD8XJvThksQYzhwY2a5LulZU13CksJzDBeXsO1bMy5/v44oJ8Uyx4XR5pZRqjFPHbhtjHgMeazD4N9aPLQK7+TJzdB9mju5DeVUNKTuP8kFaDss2Z/PW14foFRrADxMc3SxnDIg4Jcwrq2vJLSonp7CcnMIyDheUc6SwjMPW/0cKyzlWXHnK8uJ6BPHwxfadLq+UUg11iZNvArv5cmFCDBcmxFBWWcNnO4/yYVoO736byT83HCQqLICk+B7knSzncGE5x4oraHh7ybBAP2LCA4kJD2JMXDh9ugcR0yOQ2PAg+oQH0jciiAA/7TpRSrmPLhHg9QX5+3LRmBguGhNDSUU1a3Yc5YO0w+zLK6FPeCDD+4QREx5EbI9A+oQHERseSJ/wQMICu9ldulJKtUqXC/D6QgL8uDQplkvd6FolSinVUfRqhEop5aE0wJVSykNpgCullIfSAFdKKQ+lAa6UUh5KA1wppTyUBrhSSnkoDXCllPJQYhqeU+7KhYnkAQebGSUcKGzisV7AsQ4vqvM0t26essz2zK8t07ZmGmfGbWmcrtz+oPPboLa/1o3T3OP9jTFRpw01xrjND/ByM49tsrs+V62bpyyzPfNry7StmcaZcVsapyu3P1e0h85enje3v6Z+3K0L5X27C3AhO9ato5fZnvm1ZdrWTOPMuC2N05XbH3T++mn7a904ra6xU7tQ2kNENhljJtpdh/JO2v6UO3K3LfDmvGx3AcqraftTbsdjtsCVUkqdypO2wJVSStWjAa6UUh5KA1wppTxUlwlwEQkRkW9F5BK7a1HeRURGishLIvKeiPzC7nqU97A9wEXkHyJyVETSGwy/UER2isgeEXnAiVndD7zjmipVV9UR7c8Ys90YcytwJaCHGqpOY/tRKCIyDSgG3jDGJFjDfIFdwAVAFvANcDXgC/yxwSx+BiTiONU5EDhmjPmgc6pXnq4j2p8x5qiIzAIeAP5mjHmrs+pX3s32mxobYz4XkQENBk8C9hhj9gGIyGLgMmPMH4HTukhEZDoQAowCykTkI2NMrUsLV11CR7Q/az4rgBUi8iGgAa46he0B3oQ4ILPe/1nAmU2NbIx5GEBEfoJjC1zDW7VHq9qfiCQDc4EA4COXVqZUPe4a4NLIsBb7eowxr3V8KcoLtar9GWNSgBRXFaNUU2zfidmELKBvvf/jgcM21aK8j7Y/5RHcNcC/AYaKyEAR8QeuAlbYXJPyHtr+lEewPcBFZBHwFTBcRLJE5EZjTDVwO/AJsB14xxizzc46Vdek7U95MtsPI1RKKdU2tm+BK6WUahsNcKWU8lAa4Eop5aE0wJVSykNpgCullIfSAFdKKQ+lAa6UUh5KA1wppTyUBrhSSnmo/wfRGhyWdUvG9AAAAABJRU5ErkJggg==\n",
      "text/plain": [
       "<Figure size 432x288 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.semilogx(regul_val, accuracy_val)\n",
    "plt.grid(True)\n",
    "plt.title('Test accuracy by regularization (1-layer net)')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "na8xX2yHZzNF"
   },
   "source": [
    "---\n",
    "Problem 2\n",
    "---------\n",
    "Let's demonstrate an extreme case of overfitting. Restrict your training data to just a few batches. What happens?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "    # Input data. For the training data, we use a placeholder that will be fed\n",
    "    # at run time with a training minibatch.\n",
    "    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n",
    "    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "    tf_valid_dataset = tf.constant(valid_dataset)\n",
    "    tf_test_dataset = tf.constant(test_dataset)\n",
    "    beta_regul = tf.placeholder(tf.float32)\n",
    "\n",
    "    # Variables.\n",
    "    weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "    biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "    weights2= tf.Variable(tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "    biases2= tf.Variable(tf.zeros([num_labels]))\n",
    "\n",
    "    # Training computation.\n",
    "    lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "    logits = tf.matmul(lay1_train, weights2) + biases2\n",
    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=logits))\n",
    "\n",
    "    # Optimizer.\n",
    "    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "\n",
    "    # Predictions for the training, validation, and test data.\n",
    "    train_prediction = tf.nn.softmax(logits)\n",
    "    lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "    valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "    lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "    test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 344.986450\n",
      "Minibatch accuracy: 16.4%\n",
      "Validation accuracy: 28.0%\n",
      "Minibatch loss at step 5: 183.774658\n",
      "Minibatch accuracy: 61.7%\n",
      "Validation accuracy: 66.3%\n",
      "Minibatch loss at step 10: 6.777255\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 75.2%\n",
      "Minibatch loss at step 15: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.2%\n",
      "Minibatch loss at step 20: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 25: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 30: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 35: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 40: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 45: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 50: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 55: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 60: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 65: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 70: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 75: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 80: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 85: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 90: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 95: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Minibatch loss at step 100: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 75.4%\n",
      "Test accuracy: 83.0%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_batches = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "    tf.global_variables_initializer().run()\n",
    "    print(\"Initialized\")\n",
    "    for step in range(num_steps):\n",
    "        # Pick an offset within the training data, which has been randomized.\n",
    "        # Note: we could use better randomization across epochs.\n",
    "        offset = ((step % num_batches) * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "        \n",
    "        # Generate a minibatch.\n",
    "        batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "        batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "        \n",
    "        # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "        # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "        # and the value is the numpy array to feed to it.\n",
    "        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels, beta_regul : 1e-3}\n",
    "        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        if (step % 5 == 0):\n",
    "            print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "            print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "            print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n",
    "    print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "ww3SCBUdlkRc"
   },
   "source": [
    "---\n",
    "Problem 3\n",
    "---------\n",
    "Introduce Dropout on the hidden layer of the neural network. Remember: Dropout should only be introduced during training, not evaluation, otherwise your evaluation results would be stochastic as well. TensorFlow provides `nn.dropout()` for that, but you have to make sure it's only inserted during training.\n",
    "\n",
    "What happens to our extreme overfitting case?\n",
    "\n",
    "---"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes = 1024\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "    # Input data. For the training data, we use a placeholder that will be fed\n",
    "    # at run time with a training minibatch.\n",
    "    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n",
    "    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "    tf_valid_dataset = tf.constant(valid_dataset)\n",
    "    tf_test_dataset = tf.constant(test_dataset)\n",
    "\n",
    "    # Variables.\n",
    "    weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden_nodes]))\n",
    "    biases1 = tf.Variable(tf.zeros([num_hidden_nodes]))\n",
    "    weights2= tf.Variable(tf.truncated_normal([num_hidden_nodes, num_labels]))\n",
    "    biases2= tf.Variable(tf.zeros([num_labels]))\n",
    "\n",
    "    # Training computation.\n",
    "    lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "    drop1 = tf.nn.dropout(lay1_train, 0.5)\n",
    "    logits = tf.matmul(drop1, weights2) + biases2\n",
    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=logits))\n",
    "\n",
    "    # Optimizer.\n",
    "    optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)\n",
    "\n",
    "    # Predictions for the training, validation, and test data.\n",
    "    train_prediction = tf.nn.softmax(logits)\n",
    "    lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "    valid_prediction = tf.nn.softmax(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "    lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "    test_prediction = tf.nn.softmax(tf.matmul(lay1_test, weights2) + biases2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 572.435669\n",
      "Minibatch accuracy: 10.2%\n",
      "Validation accuracy: 28.4%\n",
      "Minibatch loss at step 5: 78.825836\n",
      "Minibatch accuracy: 82.8%\n",
      "Validation accuracy: 64.0%\n",
      "Minibatch loss at step 10: 0.710394\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 66.2%\n",
      "Minibatch loss at step 15: 2.970871\n",
      "Minibatch accuracy: 96.9%\n",
      "Validation accuracy: 66.5%\n",
      "Minibatch loss at step 20: 1.788096\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 67.0%\n",
      "Minibatch loss at step 25: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.1%\n",
      "Minibatch loss at step 30: 0.282289\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 69.3%\n",
      "Minibatch loss at step 35: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 69.0%\n",
      "Minibatch loss at step 40: 0.000008\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.7%\n",
      "Minibatch loss at step 45: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.8%\n",
      "Minibatch loss at step 50: 1.419862\n",
      "Minibatch accuracy: 98.4%\n",
      "Validation accuracy: 68.8%\n",
      "Minibatch loss at step 55: 3.313571\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.5%\n",
      "Minibatch loss at step 60: 0.250072\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.1%\n",
      "Minibatch loss at step 65: 0.039343\n",
      "Minibatch accuracy: 99.2%\n",
      "Validation accuracy: 68.2%\n",
      "Minibatch loss at step 70: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.7%\n",
      "Minibatch loss at step 75: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.2%\n",
      "Minibatch loss at step 80: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.5%\n",
      "Minibatch loss at step 85: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 90: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.3%\n",
      "Minibatch loss at step 95: 0.000000\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.6%\n",
      "Minibatch loss at step 100: 0.000069\n",
      "Minibatch accuracy: 100.0%\n",
      "Validation accuracy: 68.6%\n",
      "Test accuracy: 74.0%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 101\n",
    "num_batches = 3\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "    tf.global_variables_initializer().run()\n",
    "    print(\"Initialized\")\n",
    "    for step in range(num_steps):\n",
    "        # Pick an offset within the training data, which has been randomized.\n",
    "        # Note: we could use better randomization across epochs.\n",
    "        offset = step % num_batches\n",
    "        \n",
    "        # Generate a minibatch.\n",
    "        batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "        batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "        \n",
    "        # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "        # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "        # and the value is the numpy array to feed to it.\n",
    "        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        if (step % 5 == 0):\n",
    "            print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "            print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "            print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n",
    "    print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "-b1hTz3VWZjw"
   },
   "source": [
    "---\n",
    "Problem 4\n",
    "---------\n",
    "\n",
    "Try to get the best performance you can using a multi-layer model! The best reported test accuracy using a deep network is [97.1%](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html?showComment=1391023266211#c8758720086795711595).\n",
    "\n",
    "One avenue you can explore is to add multiple layers.\n",
    "\n",
    "Another one is to use learning rate decay:\n",
    "\n",
    "    global_step = tf.Variable(0)  # count the number of steps taken.\n",
    "    learning_rate = tf.train.exponential_decay(0.5, global_step, ...)\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    " \n",
    " ---\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "batch_size = 128\n",
    "num_hidden_nodes1 = 1024\n",
    "num_hidden_nodes2 = 100\n",
    "beta_regul = 1e-3\n",
    "\n",
    "graph = tf.Graph()\n",
    "with graph.as_default():\n",
    "    # Input data. For the training data, we use a placeholder that will be fed\n",
    "    # at run time with a training minibatch.\n",
    "    tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))\n",
    "    tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))\n",
    "    tf_valid_dataset = tf.constant(valid_dataset)\n",
    "    tf_test_dataset = tf.constant(test_dataset)\n",
    "    global_step = tf.Variable(0)\n",
    "\n",
    "    # Variables.\n",
    "    weights1 = tf.Variable(tf.truncated_normal([image_size * image_size, num_hidden_nodes1],\n",
    "                                              stddev=np.sqrt(2.0 / (image_size * image_size))))\n",
    "    biases1 = tf.Variable(tf.zeros([num_hidden_nodes1]))\n",
    "    weights2= tf.Variable(tf.truncated_normal([num_hidden_nodes1, num_hidden_nodes2],\n",
    "                                             stddev=np.sqrt(2.0 / (num_hidden_nodes1))))\n",
    "    biases2= tf.Variable(tf.zeros([num_hidden_nodes2]))\n",
    "    weights3= tf.Variable(tf.truncated_normal([num_hidden_nodes2, num_labels],\n",
    "                                             stddev=np.sqrt(2.0 / (num_hidden_nodes2))))\n",
    "    biases3= tf.Variable(tf.zeros([num_labels]))\n",
    "\n",
    "    # Training computation.\n",
    "    lay1_train = tf.nn.relu(tf.matmul(tf_train_dataset, weights1) + biases1)\n",
    "    lay2_train = tf.nn.relu(tf.matmul(lay1_train, weights2) + biases2)\n",
    "    logits = tf.matmul(lay2_train, weights3) + biases3\n",
    "    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf_train_labels, logits=logits) + \n",
    "                         beta_regul * (tf.nn.l2_loss(weights1) + tf.nn.l2_loss(weights2) + tf.nn.l2_loss(weights3)))\n",
    "\n",
    "    # Optimizer.\n",
    "    learning_rate = tf.train.exponential_decay(0.5, global_step, 1000, 0.65, staircase=True)\n",
    "    optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)\n",
    "\n",
    "    # Predictions for the training, validation, and test data.\n",
    "    train_prediction = tf.nn.softmax(logits)\n",
    "    lay1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, weights1) + biases1)\n",
    "    lay2_valid = tf.nn.relu(tf.matmul(lay1_valid, weights2) + biases2)\n",
    "    valid_prediction = tf.nn.softmax(tf.matmul(lay2_valid, weights3) + biases3)\n",
    "    lay1_test = tf.nn.relu(tf.matmul(tf_test_dataset, weights1) + biases1)\n",
    "    lay2_test = tf.nn.relu(tf.matmul(lay1_test, weights2) + biases2)\n",
    "    test_prediction = tf.nn.softmax(tf.matmul(lay2_test, weights3) + biases3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Initialized\n",
      "Minibatch loss at step 0: 3.263462\n",
      "Minibatch accuracy: 7.0%\n",
      "Validation accuracy: 27.8%\n",
      "Minibatch loss at step 500: 1.064792\n",
      "Minibatch accuracy: 85.9%\n",
      "Validation accuracy: 85.5%\n",
      "Minibatch loss at step 1000: 0.655002\n",
      "Minibatch accuracy: 90.6%\n",
      "Validation accuracy: 86.7%\n",
      "Minibatch loss at step 1500: 0.862098\n",
      "Minibatch accuracy: 83.6%\n",
      "Validation accuracy: 87.4%\n",
      "Minibatch loss at step 2000: 0.491978\n",
      "Minibatch accuracy: 95.3%\n",
      "Validation accuracy: 87.9%\n",
      "Minibatch loss at step 2500: 0.561126\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 88.6%\n",
      "Minibatch loss at step 3000: 0.528778\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 3500: 0.415144\n",
      "Minibatch accuracy: 92.2%\n",
      "Validation accuracy: 89.2%\n",
      "Minibatch loss at step 4000: 0.520418\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.0%\n",
      "Minibatch loss at step 4500: 0.456551\n",
      "Minibatch accuracy: 89.8%\n",
      "Validation accuracy: 89.4%\n",
      "Minibatch loss at step 5000: 0.440668\n",
      "Minibatch accuracy: 91.4%\n",
      "Validation accuracy: 89.5%\n",
      "Minibatch loss at step 5500: 0.414469\n",
      "Minibatch accuracy: 93.8%\n",
      "Validation accuracy: 89.6%\n",
      "Minibatch loss at step 6000: 0.381953\n",
      "Minibatch accuracy: 93.0%\n",
      "Validation accuracy: 89.6%\n",
      "Test accuracy: 95.2%\n"
     ]
    }
   ],
   "source": [
    "num_steps = 6001\n",
    "\n",
    "with tf.Session(graph=graph) as session:\n",
    "    tf.global_variables_initializer().run()\n",
    "    print(\"Initialized\")\n",
    "    for step in range(num_steps):\n",
    "        # Pick an offset within the training data, which has been randomized.\n",
    "        # Note: we could use better randomization across epochs.\n",
    "        offset = (step * batch_size) % (train_labels.shape[0] - batch_size)\n",
    "        \n",
    "        # Generate a minibatch.\n",
    "        batch_data = train_dataset[offset:(offset + batch_size), :]\n",
    "        batch_labels = train_labels[offset:(offset + batch_size), :]\n",
    "        \n",
    "        # Prepare a dictionary telling the session where to feed the minibatch.\n",
    "        # The key of the dictionary is the placeholder node of the graph to be fed,\n",
    "        # and the value is the numpy array to feed to it.\n",
    "        feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}\n",
    "        _, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)\n",
    "        if (step % 500 == 0):\n",
    "            print(\"Minibatch loss at step %d: %f\" % (step, l))\n",
    "            print(\"Minibatch accuracy: %.1f%%\" % accuracy(predictions, batch_labels))\n",
    "            print(\"Validation accuracy: %.1f%%\" % accuracy(valid_prediction.eval(), valid_labels))\n",
    "    print(\"Test accuracy: %.1f%%\" % accuracy(test_prediction.eval(), test_labels))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "colab": {
   "default_view": {},
   "name": "3_regularization.ipynb",
   "provenance": [],
   "toc_visible": true,
   "version": "0.3.2",
   "views": {}
  },
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
