{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "W5_Tutorial1.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "toc_visible": true,
      "include_colab_link": true
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/CIS-522/course-content/blob/main/tutorials/W05_Regularization/student/W5_Tutorial1.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ijD6fBswN8Wu"
      },
      "source": [
        "# CIS 522 Week 5: Regularization\n",
        "\n",
        "\n",
        "__Instructor:__ Lyle Ungar\n",
        "\n",
        "__Content creators:__ Ravi Teja Konkimalla, Mohitrajhu Lingan Kumaraian"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "oI04YQ_2IVm8"
      },
      "source": [
        "#### Ensure you're running a GPU notebook.\n",
        "\n",
        "From \"Runtime\" in the drop-down menu above, click \"Change runtime type\". Ensure that \"Hardware Accelerator\" says \"GPU\".\n",
        "\n",
        "#### Ensure you can save!\n",
        "\n",
        "From \"File\", click \"Save a copy in Drive\""
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MExFAXdn-ZOY",
        "cellView": "form"
      },
      "source": [
        "#@title Import functions\n",
        "from __future__ import print_function\n",
        "import torch\n",
        "import pathlib\n",
        "import random\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "import torch.optim as optim\n",
        "from torchvision import datasets, transforms\n",
        "from torchvision.datasets import ImageFolder\n",
        "from torch.utils.data import DataLoader, TensorDataset\n",
        "import torch.nn.utils.prune as prune\n",
        "from torch.optim.lr_scheduler import StepLR\n",
        "import time\n",
        "import numpy as np\n",
        "import matplotlib.pyplot as plt\n",
        "import matplotlib.animation as animation\n",
        "import copy\n",
        "from tqdm import tqdm\n",
        "from IPython.display import HTML, display"
      ],
      "execution_count": 2,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sCGgMRq8XFKl"
      },
      "source": [
        "#Intro: Regularization Overview\n",
        "To be watched **before** the pod meets."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "9T6BXJjHt_V9",
        "cellView": "form",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 519
        },
        "outputId": "95697320-d374-429d-c1a1-a3c5a64d5af9"
      },
      "source": [
        "#@title Video : Introduction to Regularization\n",
        "try: t1;\n",
        "except NameError: t1=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"bc1nsP4htVg\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Video available at https://youtube.com/watch?v=bc1nsP4htVg\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"854\"\n",
              "            height=\"480\"\n",
              "            src=\"https://www.youtube.com/embed/bc1nsP4htVg?fs=1\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.YouTubeVideo at 0x7facc453ccc0>"
            ],
            "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBoXFhwaGRodHRsfIiclIyEiGDMoJSclLycxNS0oLjA1SFxCNThLOS0tRWFFS1NWW1xbNkFlbWRYbFBZW1cBERISGRYYLxoaJVc3LTZXV1dXV1dXXVdXV2NXV1dXV1dXV1dXV2ReXVdXV1dkV1pjV1dXV15XV1dhV1dZV1dXV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAQUBAAAAAAAAAAAAAAAABAECAwUHBv/EAEoQAAIBAwAFCAgDBQQIBwEAAAABAgMEEQUSITGRExRBUVNhktIGFhciVHGB0TJSoRUjQnKxBzM08DVzorLBwuHxJENidJPD4kT/xAAZAQEBAQEBAQAAAAAAAAAAAAAAAQIDBAX/xAAgEQEBAQEAAQQDAQAAAAAAAAAAARECIRIxQYEDIlET/9oADAMBAAIRAxEAPwDn4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMytpNdHEc1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ec1l3cQMIM3NZd3Ey0NHzqS1U4p97AiA2q9H635qfif2Ll6O1vzU/E/sBqAbn1ar/mp+J/YerVf81PxP7AaYG3fo5X/NT8T+xcvRqv8Amp+J/YDTA3S9GK/56Xif2L4+ilw/46Xil9gNED0cPQu6lunR8UvKZo+gV2//ADKHjl5SaPLA9V6gXfaW/jl5S1+gl2v/ADKHjl5Ro8uD0r9B7rtKHjl5S1+hV1+ej45eUo84D0XqZc/no+KXlKP0Oufz0fFLyga+G5fIqUhuXyKhkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACdohZrJdzIJP0L/fr+Vgb5UO7HyZdyb6+JlW8vSDTAk+ovwXSjuK4JRiaLki5oqkFViiTRiYoIk0kS0TKCwS4siU5GVVFnGVn5mWsZ3IxSYyWyZqRlYzGzIyxmkY2Y5oysxyA5tDcvkVKQ3L5FQyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABsNCL/wAQv5Wa82Og/wDEL+WX/AD0sYmTBbBF+A0taGC5glGNouSDRVIjS+JdUrai73uKRIV/V1Xl7cLoJWuZ5SYXDyZlUZqLTSKk9XUlF8SXXuZQjmMdZlkdrY2tvc7dV9O4lNnnbW8lUypx1Wurcb2lPWin3GscOl7LGVLWHNSRjkXsskVHNobl8ipSG5fIqQAAABUAUBUoAAAAAqBQAAAAAAKgUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANjoL/EL+WRrjY6D/wAQv5ZAeoiXmKDMmQ0MBlMkqxVlEGERVyZBvPeUmtvUTkayXuSkujJWuJrXKnUcvewo9UUbC5pTlThqPbq4eOvr/qYalVZSzv39xsIySgtq3ljtecjFYW9VJco0+9LBuLOqmnHpX9CEquwzaPjhzl0t/oVx6mNgyjLVINlxx1RljZdIxsYjnMNy+RUpDcvkVMqAFQN1pXRtKNhaXVGLWvmFX3m1r47922MuJl9EtDUrudV188nCKS97HvyeI7V8ns7zLod840VeW++VLFaC/VpeF8TGq7tNGW8o7J17jlu9wpvZ+qjxCtFWoyhOVPGZxk4Y65J4/qbf0o0fRtKlKjST11SUqjcm8yezp3bnxNnX0aqunKTSzTralwurCjl/7Uf1MVrQjpHStxUqJzpU9aWpv1lDEYx+Txn/ALgeXxs1ujrxs4lD1stIaY19aNCcIdFFW65NL8u7L4kXTWhk7u1UIOirtRbptY5OWVrr6Z3BHnYpt4SbfcslEz2ekZX9Co6Fhb1KVvT2KUKKbqPpk21t2kPTdrUrWSu69F0binUUKj1NXlIvGrLHXlpcQrX6VsKdK0sasE1OtCbm9ZvLWrjZ0b2ajJ6u60bVu7XRdKktrpVMt/hisw2shaWv6dCm7K0/u08VquPeqzWxruiv895GhPQej1jbVLa6r3NOVRUUmlGbi8Yedz7jz56D0evbeFreUbipKCqxilqxzJrbnV6M7ekCnPNFfB3H/wA//wCjR15Rc5OCcYOT1U3lpZ2Js9DZvRVecaPI16Lk9WNV1c+892VnH6YIH7zRd/ujOdJ7MrZJNbH3bGBqgZru4darOrJJOcnJpLYm30GEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGw0J/iF/KzXk/Qz/fr+VlHposyZI8JPu4mX6hpe2UyU6ASwXZLeUinjWWerO0I8LpS8lUuKkm90mls6E8ImK99HaQNJLEl3r+jPDK4mt0pL5PBKsb6aqLWlKSezbJsuLLlbevVW1ZL7au44XR0bSLCpl++k9brJlvCOU8LZuy84Ejpet+Wxt7nX371vIVz6R8g5wjqtprGU/ruFSo1rVILPXHpeEeWqNzk5Z2t5K52vVaP9Kvxcq092OjHX0HobO7VaCnHdnrOZShLufykmeg0VpepTpqPubHucsMS4z1L1de0ci1s8/8Atio08aucdG0waK0vcupq10nF4W7GHk1LGLzWhhuXyKlIbl8iphQAAbz0Pu1SvoqbxCrGVOWXs2rKz9Vj6j0sqQVenQptOnb0oU4vOcvGW/6GjaCQV7jRV/TjoxXLkuXoUalCKzt2yWp/y/qef9GNJQtbh8q2qVWDpzkt8c7pfQ0+CoR6WpoC/wBb91c8rS/hqq8ajjob27H8smsrV52t3CXL84dGUZaym5Rz/FFN8Mmr1V1LgXAes0lY17yo7mwuJTp1MN0+cuEqcsbU03jBp9LWtahGMa11yk5Z1qSrSnqpbnLoNVhdQSwFepv9L1aGjrCFCrqNxblqv3sxcdVPu2vZ0mDS8Kd9Q57R1YVo7LilnGX+ePX/AJ6UedwGgBtND6Oo3MakJV1Sr7OSU9kJdab6/wDO01YCPT2nonVpVI1LqrQp0YNSlJVct4ecLZ3EW8ktKaTepJU41HqxlPqjHfjreNxotVdQaAzXdB0qs6bkpOEnHMXseHvRhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABO0P/fr+VkElaOqxhU1pPC1Wt3TsKPSxZmTNStJ0l/H/ALL+xkjpeiv4/wDYf2KNm3sK5NTX07TX4VKfyWP6kWrp6bTUIqHe9rHhY3VesoRcm0klk59KTbbe97TZ3VWVRPWk5PvZA5CXV+qItmMRdSnqyUuppl3N59X6oc3n1fqgN9TkvxLc9ps7epFrceZsqs6bSksw+a2G2o3dOP8AFs+TEb8VtNaK27kjxNaSc5OKwm20upZN3pS/c46lLc17z3fTaaXm8+r9UVm/xjyVRfzefV+qHN59X6oiLXJ9DfE2ugZRlOSnlyS1ovWeNn+UR6FGm6bU4tTzslktt3Kk24rbjGc7MFnhLd8L4bl8ipSG5fIqZQKlAAyMnq9GXKttDyrxpUZ1FX1f3lPWWHj6llj6QwuKsKN1aWzp1JKGadLVlFt4T3vpa6gPMFCfpqw5td1aEctRklHpbUknFd72pGV+jl6oa/NqmrjPRnw5z+gGrBU2NvoC8qwVSFvNwaynsWV1pN5YGtBnq2dWFNVJ05Rg5OCbWPeWcx68rD4FK1rUpwhOcHGNRZg3ukutcQMIM1K1qThOpGDcKeNeS3RzuybilSj+xak9Va3OIrWxtxhbMgaIoS7HRte5bVClKpje1uXzb2Fb/RdxbY5elKnnc3hpvqytmQIYJFKyqzp8pCnKUNZQyln33jEcb87UbHRdnUoXip1rN15um3yLazh7pdXQ+IGmBLtNHVrly5CjKervUf4c5wtpIfo7epZdrUx9PuBrAZrW1qV5qFKEqkn0RWdnX3Il3mgru3g51aE4wW+WxpfPD2Aa8G99GdBu6lOdSlKVHk56slLC5RNYWz6kbRtnUoXlOFe0dWbTfISxmXuvb1bMN/QDVlDLX21J4hq5m8QW3V97ZFdeNxsPVu+1Nfm1TG/oz4c5/QDVAuUG5aqT1s6uOnOcY4k6joO7qVJU4283OGFJbEotrKTbeM4a6QNeCReWVW3nqVqcqcsZw+ldae5kcAAAAAAAAAAAAAAAAAVKACoKAAVKAAACgAAAAIAAAAAAGAwKQ3L5FSkNy+RUAAAPXaMhQloWSuZzhT5xvgsyzswX2tnYWtGOkKfL3EYTwk8LVn0OS2Y24696NTHSFL9kSt9b986+vq6r/Ds253Fno5pWFvUlTrrWtqy1ascZxs2Sx+n/AGCtn6OVJXN1eXs+TVaENaGu8QjKSaTb6ko4yRI6PulU5VX9ryuc6/PtueG7u3GDRukaVldVFF8vazThLZhypvdseNq+5fPRmj29aOkNWnv1XbydRLq+f0A2N5a0a+l7bEqU1VUZVVCSlHlIqTlx1UanT+la1a7qt1JxUJyjCMZtKKi2tmOnZvIzuoULpVbXW1Kc04cpveN+cde36M218tG3c3cc5nbyn706TouXvdLi1s2gNMXdSvoiznVlrT5eacnveFNLPW8Ix6f/AMDo3/Uy/wCUzacdL9k2nIRnGny89XXfvPGunJ/N7fqWxrWl5ZW9Ktcc3rW6ccum5RlF/L5ICzQn+jNJfKl/VlKX+g6v/uY/0iZIXllQsru3pVJzqVFHE5QaU2nuiv4Uu/fkh076ktFTt3L9666mo6r/AA4W3O4CRa2lbmMHWvFbW0pPk44blN9LxHDa3k6lSf7KvIu4jcUo6kqck3mLyspqW2PRs7yDzi3vLOhRq1ub1rdOKcoOUJweOrc9iLqNzaULG7oQrupVqqOHyTjF4e6OerreAMmjLypb6GrzpS1Z85UVLG1ZhBPHU8dJZ6F1ZT0lGU5SnLkp7ZSbfR0sh0r2mtFVbdy/eyuIzUdV/hSjtzu6GV9Fb6lbXiq1pasFCazqt7XjGxBGsoV6kHinOcXLCxCbTb6Fs3npr+7qaPtOburOd3XWtVbqOXJQe6Kzuf8A1fUQPRSva0a0q1zPVlBfuk6bktZ59546v+Jfc2tjVnOpPSUpTm3Jt2k9r/zsCsegLevKlXlC4VtbpRVWo30rdFY252929G59FqCVxONO8jcUZU5qpTalF90tWW9b1ldZptEXlCVnWs7ibpKpNVIVNVySmktkkuj3US9A1LOxueUndco9SUU4UZakc9be1v5IDF6E158tUhry1ebVJaus8a2Y7cdfeYvRCtOppO2lOcpy9/bKTb/up9LMPopeUre5zXbjCdKVNySzhy1duz5EjRcray0hbzVyqtKClrzVKSSbhJJY6d63ASPR1RhcX9y4qUraFWcE/wA2Zbf0/U0v7VueV5bl6nKZzra7/pux3biXorS6trurUceUo1XOM4/mhKTedvT/ANSR+zdGa2vz6XI7+T5GXKY/Ln/jgDP6RxjK5sbhRUZXMaU5pfm1o7f1/Qx+mV/Wd9Up8pJQhq6sVLCWYpt7N7y95E0rpdXN3SqKOpRpOEYR/LCMk87On/oWekl3Tr3tarSlrQk44eGs4glue3egJ+nKsqujLCpNuU81I6z2tpPCy/ojzhuL++pT0daUYyzUpSqOcdV7MybW3czThAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAYFIbl8ipSG5fIqAAAAAAAABdCWq09jw08NZWzrXUbuelrKrLlK1guUe18ncShBvr1VuNEANhpbS0rpwWpGnSprVp0ofhiv+L2GvAAFSgAqCgAqUAAqCgAqCgAqCgAqCgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAwJVK7goxXN6L2Lbme3Z/MXc8h8NR41PMQobl8kVAmc8h8NR41PMOeQ+Go8anmIhQCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8w55D4ajxqeYhgCZzyHw1HjU8xSV5DD/wDDUeNTzEQPcBSG5fJFSkNy+SKgCpQqBsKWhqko05cpQi6kVKEZV1GTTeFsfW0QatOUJyhNOM4tqUXvTW9HpOSbhYv9nzuf3FP306mF78vder7uzft69pSpaK7rV6DqRlKlcOpymxOVGTxV3fleHxA83Tg5yjFb5NRXzbwiT+zqma2UkqDaqScvdUk8aqfS29yRkncqtexqRSjGVaGqksYipJRXBI2mnmrlV426lF29WrKpR3ueZPNddb6GuhYA85no6RnvPWSjPnMqTgv2bybalya5Pk+TyqiqY/HrY25zkxWNzONXR1JaqhVow5RcnH38ymnrNrLA8wEzdObVpaUouMVVqVYyk4JvCqxSy3t1VnP0NhpNa1vcxqxrKNGdNRlUoRgl+91ZOnqpYTWdm3ZgDzVtbyquShhuMJTe3+GKy/0LrS1nWmoQxrNSe142Ri2/0TPT1HcKvdw5PFrG3rcniilTUeTeq4Txtyu/btNP6MY55DKbWpVylvxyUsgQFaT5BV3hU3LVjl7ZPp1V0pdLMCaPROFO5rWtx/8Aya0KcqTeyg+iD/8ATJ/xdOXksvHcStrvnkHBQ1eRboqGrU1/wU2ksx1c537NoGgyusk0bGdSMJR1cVJuEczS95JN5b2JYZ6GvUlLSF1CMYudGFR29NUo/wB5iOWlj3pYy1nJdactPmLuYPWlc1Px0lFyXJrDksLPza6APJay6yreDe2F5UpUdGqDilUlJT/dxbmuXxhtrOMNmWUKtOhPmUG585rQq6lJTkkninBpp4hjPcB50ZW7O02PpGtW9rbEsamyKwl+6hlLBvORap1qElVnCFrKSlyEVQclTUlKDSznPTl52geS1l1l0I60oxWMyaS29LeEelsriXOLK393kattT148nH3s0pbW8Ze5GOydeNGxVrBypTw6rVFTzU5T3lN4eMLGN2zaBopWslW5F41+U5Pfs1tbV3/MtrUnTnKEvxRk4vb0p4ZsK3+lH/7z/wC4k1KVS3vqtWvQqq3lVqRnKVGShyc5NZTax0poDUWtvKtUjShjXlnGXjcm3+iMGstm3eeptLbmlza2r1ZVHOpUqSXVyc400n1aqcsf+pFNH+5bWvIxryjOP7xUrWNVTnre9Co28rZhJbNm0DzBJrWU4V3byS5RTUMZ2azxjb9THeOPKVNSLjDWlqxe9LLxH6bjf3mj61XSiqwpylRnVhVjVUW6ep7rcnLcsYYHnq9N05yhLClCTi9vSnhljZv7i/lTt6lahJJzvKrU9SLeq4prDaeESrqE4VL2VpD9+qtPKhTUpxpSp5k4Rx0zxnCA8tkZPUzpTVaM5Jxqws4zlCnRg6jk5tPVi1hPreNm0rWerUjWcHruwrTaq0km5RctVzjjDeMbcbQPKgvq1ZTk5zeZSeW8JZf02FgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD3APcBSG5fJFSkNy+SKgAABeqsksKUsdWs8Ge3vOSp1IRhHXqJxdTL1lB4zFLcs43kUAVK6zznLz152loAu15aurrPV36us8Z+Q1ns2vZu27i0AVb6C6VWT3yk+jbJvZ1FgAv5SWEtaWFuWs8L5ItTxu2FABXOxrOx79u8rKpKWFKTaW7Mm8fLqLQBdrPOcvPXnbxKupJvLlJvflyec9ZYAK5eza9m7buJVpeQpqSnRjUy85c5xl8m4vbHufWyIAM13cyrVZVZ41pPLwsJbMJJdSSSLOVlhR1paq3LWeF9CwAXazznLyt20RqSSaUmk96Umk/mi0AVy85zt6zNSuMSTqJ1YdMJVJJPgYA2Bnu7udarKrJ4lJ52bEuhJdSS2GONSUU1GUknvSk0n8zBy8e/gOXj38AMheqktXV1par/h1njgYOXj38By8e/gBlz0dBJtLpU3JzpqpnG1zlGS+UovPz+hB5ePfwHLx7+AE67vp1ainsp6sVGCg2lGK3JPOevb3kdzb3tv69e8w8vHv4F0aibwgLygAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD3APcBSG5fJFSkNy+SKgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAApLcypSW5gbD0a0C72rq5wsazfVHON3S2bn0g9D4WlJVYS14JpSUlhrO57N6yab0b087Kqp4ysOMl1xzn6M9ZpXTEdJaNqzoZXJTTqwe2Wqtufl0/RmL8vZzZLzmZ8vF82h+VHo9BehsLqjy1SWpGWdVRWW8bMvJ52NeD3SPS6C9Lla0eRqQc4xb1XF4aT6HkzzL8vR+b0en9M36abTGhFaV3SklLYpRkumL3P8ARkWhYxqTjCME5Saivm3hEzS+lnd13VliOxRjFdEVuX6ss0PmtdUoUZfvNdNPG7Dy2+5YFnUrfP8An6P2zfp6G4/s+iqLcamaqWcauIt9S6UeGjDVqNdR0PTnpvToyq0KS1qkcx18+6njbhdOH/Q55GprVG+s6T3fP6887fdlABXAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA9wD3AUhuXyRUpDcvkioAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAKS3P5FSktz+QEQ2/oxpbmd1Gcv7qfuVV0OD6fpv4moLqeNZa27O35BpvtPaElZ16ihGToP34TUW46r3Jvds3cDVyljedltXSlQhyeq6LgtXG2OrjdwOL6RnB1qnJf3evLU/ly8fodufyZGbPLLCLk1GKbk9ySy38keisKf7Lsal3KLjc3H7ujGSw4rpljo3Z+i6yZ/ZrKm5V845ZKOOvU25x9cZ+hl/tLlTdOgsrllJvGdqg1t/XHAnfXq8EmOffq+syUPxfQxmSh+L6HJpIAAZAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPcA9wFIbl8kVKQ3L5IqAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACktz+RUpLcwIgADTYaH0xVs69OpGUtWMsyhnZKL/EsdeCf6YaNhSrxuKO23uVykGtyb2yX65+vcaA6V6GaOhcaMpq5SqwVWU6cWvwarxjjl/UDRVG9FaMUV7t3ebW9zhSX9Ht4t9R5SpUlJ5k22+lvLPTf2h28oX+s56yqU4uKf8KTa1fl0/VnlxoGSh+L6GMyUPxfQCQAAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAe4B7gKQ3L5IuOiWHoxZyoUpSopuVODb15b3Fd5I9VbHsF45fcLjmYOmeqtj2C8cvuPVWx7BeOX3BjmYOmeqtj2C8cvuPVWx7BeOX3BjmYOmeqtj2C8cvuPVWx7BeOX3BjmZQ6Z6q2PYL/AOSX3K+qtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMgdN9VbHsF45fceqtj2C8cvuDHMg1sOm+qtj2C8cvuPVWx7BeOX3BjlfIPrQ5B9x1T1VsewXjl9x6q2PYLxy+4VyvkH3G40Lp+6sYyhScJQbzqzi2k+tYaaPeeqtj2C8cvuPVWx7BeOX3A5rpK6rXdZ1q01Kb2bsJJbkl1EXkH3HVPVWx7BeOX3HqrY9gvHL7gcr5B9xdTpNPOw6l6q2PYLxy+49VbHsF45fcI5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kDpvqrY9gvHL7j1VsewXjl9wY5kHuOm+qtj2C8cvuWVfRayUZPkFuf8cur5gxstF/4ah/qof7qJRF0X/hqH+qh/uolBQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACyt+CX8r/oXllb8Ev5X/QDBov8Aw1D/AFUP91Eo5hQ9PbunThBU7fEIqKzCWcJY/MZPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlg5p7Q7zs7fwT8w9od52dv4J+YDpYOae0O87O38E/MPaHednb+CfmA6WDmntDvOzt/BPzD2h3nZ2/gn5gOlllb8Ev5X/Q5v7Q7zs7fwT8xSX9oN401ydvtWPwS8wHkwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAf/Z\n"
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 5
        }
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "OLIyaJBeIDs1",
        "cellView": "form"
      },
      "source": [
        "#@markdown What is your Pennkey and pod? (text, not numbers, e.g. bfranklin)\n",
        "my_pennkey = 'moji' #@param {type:\"string\"}\n",
        "my_pod = 'discreet-reindeer' #@param ['Select', 'euclidean-wombat', 'sublime-newt', 'buoyant-unicorn', 'lackadaisical-manatee','indelible-stingray','superfluous-lyrebird','discreet-reindeer','quizzical-goldfish','astute-jellyfish','ubiquitous-cheetah','nonchalant-crocodile','fashionable-lemur','spiffy-eagle','electric-emu','quotidian-lion']"
      ],
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "qH6yOo8LOHFG"
      },
      "source": [
        "---\n",
        "# Learning Objectives\n",
        "\n",
        "We show how\n",
        "\n",
        "\n",
        "1.   Big ANNs are efficient universal approximators due to adaptive basis functions\n",
        "2.   ANN’s memorize some but generalize well\n",
        "3.   Regularization as shrinkage of overparameterized models: L1, L2, early stopping\n",
        "4.   Regularization by Dropout\n",
        "5. Regularization by Data Augmentation\n",
        "6. Perils of Hyper-Parameter Tuning\n",
        "7.   Rethinking generalization   "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "0LMCVKg3EOSn"
      },
      "source": [
        "## Recap the experience from last week\n",
        "\n",
        "What did you learn last week. What questions do you have? [10 min discussion]"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0q2tL2Q5CAde",
        "cellView": "form"
      },
      "source": [
        "learning_from_previous_week = \"% [][][\\\" \\\"\" #@param {type:\"string\"}"
      ],
      "execution_count": 7,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GvXxa4b2l4m4"
      },
      "source": [
        "# Setup\n",
        "Note that some of the code for today can take up to an hour to run. We have therefore \"hidden\" that code and shown the resulting outputs.\n",
        "\n",
        "[Here](https://docs.google.com/presentation/d/1n4eA5VGG8ab0mkW1kJK5egaldJR4cnpFAHDVbkVPnRI/edit#slide=id.gb88533964a_0_198) are the slides for today's videos (in case you want to take notes). **Do not read them now.**"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "wDBtaMET-fNA",
        "cellView": "form"
      },
      "source": [
        "# @title Figure Settings\n",
        "import ipywidgets as widgets\n",
        "%matplotlib inline \n",
        "fig_w, fig_h = (8, 6)\n",
        "plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})\n",
        "%config InlineBackend.figure_format = 'retina'\n",
        "SMALL_SIZE = 12\n",
        "\n",
        "plt.rcParams.update(plt.rcParamsDefault)\n",
        "plt.rc('animation', html='jshtml')\n",
        "plt.rc('font', size=SMALL_SIZE)          # controls default text sizes\n",
        "plt.rc('axes', titlesize=SMALL_SIZE)     # fontsize of the axes title\n",
        "plt.rc('axes', labelsize=SMALL_SIZE)    # fontsize of the x and y labels\n",
        "plt.rc('xtick', labelsize=SMALL_SIZE)    # fontsize of the tick labels\n",
        "plt.rc('ytick', labelsize=SMALL_SIZE)    # fontsize of the tick labels\n",
        "plt.rc('legend', fontsize=SMALL_SIZE)    # legend fontsize\n",
        "plt.rc('figure', titlesize=SMALL_SIZE)  # fontsize of the figure title"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "1lf_iMdDtkH0",
        "cellView": "form"
      },
      "source": [
        "# @title Loading Animal Faces data\r\n",
        "%%capture\r\n",
        "!rm -r AnimalFaces32x32/\r\n",
        "!git clone https://github.com/arashash/AnimalFaces32x32\r\n",
        "!rm -r afhq/\r\n",
        "!unzip ./AnimalFaces32x32/afhq_32x32.zip"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "mvhx88j7e6m2",
        "cellView": "form"
      },
      "source": [
        "# @title Loading Animal Faces Randomized data\n",
        "%%capture\n",
        "!rm -r Animal_faces_random/\n",
        "!git clone https://github.com/Ravi3191/Animal_faces_random.git\n",
        "!rm -r afhq_random_32x32/\n",
        "!unzip ./Animal_faces_random/afhq_random_32x32.zip\n",
        "!rm -r afhq_10_32x32/\n",
        "!unzip ./Animal_faces_random/afhq_10_32x32.zip"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RMuvvw3VHm1Z",
        "cellView": "form"
      },
      "source": [
        "#@title Seeding for Reproducibility\n",
        "seed = 90108\n",
        "random.seed(seed)\n",
        "np.random.seed(seed)\n",
        "torch.manual_seed(seed)\n",
        "torch.cuda.manual_seed(seed)\n",
        "torch.cuda.manual_seed_all(seed)\n",
        "torch.backends.cudnn.deterministic = True\n",
        "torch.backends.cudnn.benchmark = False\n",
        "torch.backends.cudnn.enabled = False\n",
        "torch.set_deterministic(True)\n",
        "def seed_worker(worker_id):\n",
        "    worker_seed = seed % (worker_id+1)\n",
        "    np.random.seed(worker_seed)\n",
        "    random.seed(worker_seed)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RxCVV-2yt-GX",
        "cellView": "form"
      },
      "source": [
        "# @title Helper functions\n",
        "def imshow(img):\n",
        "    img = img / 2 + 0.5     # unnormalize\n",
        "    npimg = img.numpy()\n",
        "    plt.imshow(np.transpose(npimg, (1, 2, 0)))\n",
        "    plt.axis(False)\n",
        "    plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "RSaZl2xSdALk"
      },
      "source": [
        "Now, lets define a Animal Net model, train, test and main functions which we will use quite frequently this week."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QSfEJun00dwZ"
      },
      "source": [
        "##Network Class - Animal Faces\n",
        "class Animal_Net(nn.Module):\n",
        "    def __init__(self):\n",
        "        torch.manual_seed(104)\n",
        "        super(Animal_Net, self).__init__()\n",
        "        self.fc1 = nn.Linear(3*32*32, 128)\n",
        "        self.fc2 = nn.Linear(128, 32)\n",
        "        self.fc3 = nn.Linear(32, 3)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(x.shape[0],-1)\n",
        "        x = F.relu(self.fc1(x))\n",
        "        x = F.relu(self.fc2(x))\n",
        "        x = self.fc3(x)\n",
        "        output = F.log_softmax(x, dim=1)\n",
        "        return output"
      ],
      "execution_count": 8,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gXPoWJta6hs8"
      },
      "source": [
        "The train function takes in the current model along with the train_loader and loss function and updates the parameters for a single pass of the entire dataset. The test function takes in the current model after every epoch and calculates the accuracy on the test dataset.\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "m6umnH-Znil7"
      },
      "source": [
        "def train(args, model, device, train_loader, optimizer, epoch,reg_function1=None,reg_function2=None,criterion=F.nll_loss):\n",
        "    \"\"\"\n",
        "    Trains the current inpur model using the data \n",
        "    from Train_loader and Updates parameters for a single pass\n",
        "    \"\"\"\n",
        "    model.train()\n",
        "    for batch_idx, (data, target) in enumerate(train_loader):\n",
        "        data, target = data.to(device), target.to(device)\n",
        "        optimizer.zero_grad()\n",
        "        output = model(data)\n",
        "        if reg_function1 is None:\n",
        "            loss = criterion(output, target)\n",
        "        elif reg_function2 is None:\n",
        "            loss = criterion(output, target)+args['lambda']*reg_function1(model)\n",
        "        else:\n",
        "            loss = criterion(output, target)+args['lambda1']*reg_function1(model)+args['lambda2']*reg_function2(model)\n",
        "        loss.backward()\n",
        "        optimizer.step()\n",
        "        \n",
        "\n",
        "def test(model, device, test_loader, loader = 'Test',criterion=F.nll_loss):\n",
        "    \"\"\"\n",
        "    Tests the current Model\n",
        "    \"\"\"\n",
        "    model.eval()\n",
        "    test_loss = 0\n",
        "    correct = 0\n",
        "    with torch.no_grad():\n",
        "        for data, target in test_loader:\n",
        "            data, target = data.to(device), target.to(device)\n",
        "            output = model(data)\n",
        "            test_loss += criterion(output, target, reduction='sum').item()  # sum up batch loss\n",
        "            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability\n",
        "            correct += pred.eq(target.view_as(pred)).sum().item()\n",
        "\n",
        "    test_loss /= len(test_loader.dataset)\n",
        "    return 100. * correct / len(test_loader.dataset)\n",
        "\n",
        "def main(args, model,train_loader,val_loader,test_data,reg_function1=None,reg_function2=None,criterion=F.nll_loss):\n",
        "    \"\"\"\n",
        "    Trains the model with train_loader and tests the learned model using val_loader\n",
        "    \"\"\"\n",
        "\n",
        "    use_cuda = not args['no_cuda'] and torch.cuda.is_available()\n",
        "    device = torch.device('cuda' if use_cuda else 'cpu') \n",
        "\n",
        "    model = model.to(device)\n",
        "    optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum'])\n",
        "\n",
        "    val_acc_list, train_acc_list,param_norm_list = [], [], []\n",
        "    for epoch in tqdm(range(args['epochs'])):\n",
        "        train(args, model, device, train_loader, optimizer, epoch,reg_function1=reg_function1,reg_function2=reg_function2)\n",
        "        train_acc = test(model,device,train_loader, 'Train')\n",
        "        val_acc = test(model,device,val_loader, 'Val')\n",
        "        param_norm = calculate_frobenius_norm(model)\n",
        "        train_acc_list.append(train_acc)\n",
        "        val_acc_list.append(val_acc)\n",
        "        param_norm_list.append(param_norm)\n",
        "\n",
        "    return val_acc_list, train_acc_list, param_norm_list, model, 0"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2icaflJHWPm1"
      },
      "source": [
        "#Section 1: Regularization is Shrinkage"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0qn2FpI2Wdgn",
        "cellView": "form",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 519
        },
        "outputId": "31e6c644-3fb8-4e76-c21a-e2b41743f464"
      },
      "source": [
        "#@title Video : Regularization as Shrinkage\n",
        "try: t2;\n",
        "except NameError: t2=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"B4CsCKViB3k\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": 9,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Video available at https://youtube.com/watch?v=B4CsCKViB3k\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"854\"\n",
              "            height=\"480\"\n",
              "            src=\"https://www.youtube.com/embed/B4CsCKViB3k?fs=1\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.YouTubeVideo at 0x7facc453cdd8>"
            ],
            "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBoYFRsaGRcdHRofIh8iIiEgHjEnIyElLyc1MC0nLS00PFBCNThNOS4tRGFFS1NWW11bNUFlbWRYbVBZW1cBERISGRYZLxsbLVc/NzZXV11XV1hXV11XV1dXV1dXV1dXV1dXV1dXXVdXV1dXV1ddV15XV1dXV11XXVdXV1dXV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAAAwQBAgUGB//EAEgQAAIBAgMCCQoEBAMIAgMAAAABAgMRBBIhEzEFF0FRUlOSsdIUIjIzYXFygZGyBjRzoSNCwdFDYuEHFiRjgpOi8VSjFUTw/8QAGAEBAQEBAQAAAAAAAAAAAAAAAAECAwT/xAAiEQEBAQEAAgIDAQADAAAAAAAAARECITESUQNx8EETMmH/2gAMAwEAAhEDEQA/APn4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPUUPwFjalOE4yo5ZxUlebvZq65CTi8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwji8x3Sodt+EDyYPWcXmO6VDtvwkGM/A+Lo03OcqOVWWk29/yA80DrL8P1ulT+r/sZX4crdOn9X/YDkA7P+7Vfp0/q/wCxh/huv0qf1f8AYDjg66/DtbpU/q/7Gy/DNfp0vq/7AcYHbX4Wr9Ol2peEkj+EMS/56Pal4QOAD09P8CYuW6pQ7UvCTr/Z3jOtw/bn4CaPIg9ZL/Z7jF/i4ftz8BG/wHi1/iUO1LwjR5cHpX+CMV1lDtS8Jj/cnFdZQ7UvCUebB6X/AHIxXWUO1LwmH+CcV1lDtS8IH1Hgn8pQ/Sp/ai2VOCfylD9Kn9qLYAGABkGABkGABkAAAAABgAZAMAZBgyAAAAGABkAwBkAAAYMgAAAAAAAAAAAAAAAAAAAOX+Io3wrX+aPedQ5n4g/LP4o94Hktjrbd7jOzfvJktTewECRlxJMuvyQaIqFR1JIoZdSSKA3hEvYamV6UTONxmyilH0pcvMiVZNdyi4xWrS+ZZjUTWjT9zPGqrPl+t7m2GxzjO0ZrMuRP/wDrj4ul4x62pIrTI8Ni1Vhfl3NczNpMmMVFJGliRmjNMsGkmZkzSTA6/BP5Sh+lT+1FsqcE/lKH6VP7UWyo5GPx1XyqOGoypU5OntHOonK+tssYpq705zGKxmJoYac6uxzxnTSlG+WUXKKbab816vlMcM1MO5KnisNOdO141Nm5xT5VeOsXuOXGjN4XERhGq8O6tDYxqKTlbPHPo9cvv9pnXfnmWR6DC8K0K03CnUUpWzWs1ePPG+9e1Fevw1SdKbo1acpQSbzXyLzkvOaWj13GcZBvH4SSi7KGIu7aK6ha7OdKhJcDOCg8z/lUXe+1vuKzOefH/rr4nhahRk4VKiU0k3FJuVny2Su0R1uEbywrpOMqdebWbf5qhKWnzRrhIPy/Eyyuzp0EnbR+ldJnHpYSrLD4SEM1OflGJ87Jdwi9p51vc9Peias55/v09FLhCitpepFbK20b3QvyN7r+w1wfCdGvJxpzvKKu4uLjK3PZpae048J1aOCqUIU3CtScFKUYOWeDl51aF/Sla7tq0/kYpQdTFx2dWtUj5PXjtKkLZZNwss2VX5y6nwmV03w7hU2nWjo7Xs8rfMnazfsRNi+E6NGSjUnaTV1FJylbnsk3b2nnq2LiuDFhnRqRq040oyi6bUYuM43lm9Gzeuj5S1iM1HG15zq1acKqpuE4UlNPLGzg/NbTvd/Mmr/xx36NaNSCnCSlGSumndNFOPCMYbaVWdNQp1FBZb31SaTXS13L2DgagoULxdRqcpz/AIkVGXnO781JWXL8zl1aMf8Ai3VjWivKYyhOnFuUGqcbTWmq3rcy6zOZtjt4PHU6+bZyu42zJpxlG+66eqKWMxOIeM2FCVKK2SqN1IOTvncbaSRjgXEVJzqxlOVWlHLkqyp7OUnreLWl7aa2W8hxuA23CGsqsI+T6TpylDzto9Lrfz2CySdeVjA8JSUq9PEunGVBQlKcdIOMk2nZvR6Ms4ThOjWk4053klfK04ytzpNK69p55YKawk6apz29KvTnVkk5OvGLTU4uXpaa251YvYXLXr05KvXnKnGbTlRUIrMrOLeVPmdvYTV64nm/39V2fD2FjKUZV4rK2m7PLdb45rWb9m8uQxEHN01JOajGTXMnez/Znlq2JVPgmeFlRqRqwpOMls3lut88/o25b3udN4hUMbKdSM8tSjSUXGEpJyi5Xj5qeuqLpfx/S1iOEvOobJxlGdZ0pOz0spXS9t4m9XhnDwm4SqpNPK3Z5Yy5nK1k/ezj4OnN08M3Tmv+NrSacXeKbqWb5t6IOEJ1J4fFxqTrKs3VUaMaV4OOuW3m63VnmvvJpOJuPR1+EqNOoqc6iVR5bR5XmbSsvkyGnwpGNOUq04L+LUppwUmvNk1Z6b9NeQiw1N//AJCpJxdvJ6KUrf5p3V/oUVi6mGwlXLTntJ4nERi8kmoqVSX8RpK9ktfboXU+E9OtT4Xw8qc6iqxyQaUm7rK+Ra8uqMQ4YoShUlGfq1eScZKST3PK1exzHUUMFlw2182pDazdKW0s5XnUSkvOly7n+xtglmxjcJ1akPJ5JTqQtrnWidlcafCeVnC8M7alhqkHCO0nGM4yzXTcHJxi7avT3byfEcNYalNwnVSkrZtG1C+7M1pH5nHwU8+H4OioTzUatONROEllaozTvdbr8pNhsTHDU69CrRqSqSqVpJRpuSrKcm000rbmk77rE1q8TXXxXCVGi4qpUUXJNxW9yS5rb95vg8bTrwz0pqUbtPkaa3pp7mcfg3B1KVbBxmm3TwtSMnvSlmhpf6/Qu8FQarYy8Wk691dWutnDVF1jrmSeP7y6YAK5gAAAAAAAAAAAAAcz8Qfln8Ue86ZzeHfy7+KIHm4o3sYijawGGtTFjZ7xYlaR21JIo1a1N4kE9M4/DMVOom35q9v7nXizkcIZKrcWrxi7Je5ie2+YiwNS8ZK7air35LGkZQm2tMyfPZr3E2DilGSW5rLYzQwlNO+VX/c68tdSunwVVcalr6SVv7HYbPPxrKE4S5pI7VOpmipbrpMz1HKt2zRszc1bIjWTI5M3ZHJhHb4J/KUP0qf2otlTgn8pQ/Sp/ai2BgGQBgGQBgGQBgMyAOX/APhIPKp1a86cZKSpzqXjdO6vytJ8jbOmZAW232wDICMAyAMAyAOZLgWEtJVa0qd82zlUvB63s+Vq/I2dIyAttvtgGQEYBkAYBkAYBkAYBkAAAAAAAAAAAAAAAAADm8O/l38Ue86RzeHvy7+KPeB56JsaRZtcKGTDeouZqxhmyNJM2TEVJFnFxzy1J252dmJyOHI5Hn5JK3/V/wCu4rXNyuVOvaTV3a1t/wC50sJXTVk9xwMRQqtqVOTs+RO1joUE6ShKTzP+Y3F6t9u1Spuc0lvO9FJJJbkcXgeWaTnyWsjrZhXK1Jcw2a3MNkxkbI5MzJkbYweg4J/KUP0qf2otlTgn8pQ/Sp/ai2RQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOZw+/8Ahn8Ue86RzPxF+WfxR7wPORZumQQN0lzIDdvUhxmLhRpupNvKrbtXqSNnE/FlS2Hiuea/ZMmKVPxVR5KdR/Rf1IZfi1fy0H85/wCh5iMW2kldvcdKhwRLfUeVcy3/AOhuc76S11I/iyo3aGHi3zZmyTFcK1a1JQqQpwzNWUbt/O/tsUXSUPNglFfu/eyOor211Os/Ex81rD0Hf0vkWlQa3u91axUw8s3xd5dhJW15Dn68V1vlFLF1KNdRpzyxlTtz2d3qSYfhPE0ZNv8Aip+2/wA7HK220xDlyJWRb2h6OfxTrna4d/mvNvM9PV4DFOrBSas7tWLWc8hRxso6ZmQcI8JqUXSlOa3O+/2nLvi8Lx1OntJMhzanG4BnJUsrlJrkze+/9TqKZzaep4J/KUP0qf2otlTgn8pQ/Sp/ai2ZaAUK+MmqzpxVNWjGV5zcb3bWmnsFThFQ0kr+dCPmZpauN+b/ANgXwYMgAYuAMgwGwMgwYlNJpN6vRe3S4GwMBMDIMXXOYlNJXbstF9XZAbAwLgZMBtc5SpN7fSpdJTzK+95llSV/5Vo37foF4GsppNJve7L2u1/6MyBkwEzEpJJ6gZMlDA1P4tSGeUrRpSV7tWknrfkbs7rk05y+AAAAAAAAAMBtLe7Gs5ea7PkYGwKmGwlN04Nx1cY8r5veSeR0+j+7AnBB5HT6P7seR0+j+7AnBB5HT6P7seR0+j+7A0xe2zw2esX6W7zba3157NfMoxqYpQlOpeCgpySeV5rZWotr/qWh0fI6fR/djyOn0f3YFG2KdmtNMzVo6uytF+z0l8ihwhiKksPWVRtqMqa3K2a7zJNcm7fqd3yOn0f3ZR4a4Oc6DVGnmneOma2nLvYHl4SJkbx4FxfUf+cP7kFbg/hBerwLb55VqaX0Uio3b1+h578XzdqMfjfcdJ8E8MSvego/DOlp83Io478J8J1ZJvDSbXK61PxBrPDicGQSbm+TRe/nOi690mXKP4R4QjTt5Lrq/W0/Eb0/wpwhls8K/wDu0/Eer8d5k9uHe1zJT0I6jSSbaOz/ALq4/wD+K/8AuU/ERVfwhj2/yv8A9lPxG95+2ZL9OdhKyzJrcWMbi0t29rvJv9zcf/8AFf8A3afiNJfhjHRf8SivZetTv9xz6556vt156vMcum8uq1ZajUvyWZbj+HsZdLYNX3XnBJ+5t2ZYX4Vx6/8A1v8A7KfiPROuef8AXn6l6845knoU8bTcrNb9x6Cp+GMdGLbw9kld/wASG7tGj/CfCDjJeS77/wCJT+X8xj8l565zV45su4h4Ix+WmtpNKzkld200OvDFwaupxaW93RxpfgzhHLG2FeZXverTt9xJhvwlwjGcXPD5YRd21UpuyWvSPFr04+mcE/lKH6VP7UWypwT+UofpU/tRbMqqSwUZV51JxjJOEIpNXaacm9/vRFWwUrycMt9pTnFPRWjFK2m7cdAAYRkADl8LVMkqc97p55uKfnStFpJJ/wCaUd2t7FjD4WUcNCnfzlFJvX0uV6NPfflLTina6Wm72GQKNHBTjNNzTSe69T+s2v2M4nDuda8oudPZyiop2tJvVvXm3Pk15y8AObgqsnUgm3qq+ZXullqJR+iuiTE4acqlNqrUSzSeihaKyvniXFFXvbV7zIFTGUZypxim5edDPuvKKeq5Fr3XNGslSkleClKo3HNe/mt7i8YcE2m0m1udtwHOw+Gk6znOjlVoWvJPK1mbem93kT4/DznHzak1rDzYqNvSWusWy2ZAqYilV2FWNOo3VcZ5JStpK3m7kla5XqUYwjdJ07xUEnOzm0nbVPS1278vyR0zSdOMvSin71cDmSw06k6eempxUZKUsytUk4xSduSNs+nPY6UKEItyjCKk97UUm/mbpGQKWJw05TptVaiWduyULRWSS5Y9/OZxlGcqKjGTk80M25SlDMsy0stVdFwAcrE2ppuLlT86nJxT5HNK6W5t2ta+799qWFcq7nOjZWha8k7SUpScvf6NjouKbTaV1u03GQMRgluSXLojYAAAAAAAAACHFUdpTlDTzlbVXK+HwOzhUV9GrLLpZK+vv13+xFqtVjCOaTstP30SI1iYTjPLL0U73Vu8DbC+qp/DHuJiHC+qp/DHuJgAAAAAAAABgyAMAyAMAyAMAyAMGJySTbaSW9vchKSSbbslq2+QrU47Vqcl5i9CL5f8z/oipaXlV3XhT590pe7mX7k1PDwh6MEvctSQMaY1qJNPMll5b7jl1qNa0ZYdJxU42hUbtlvrJc3uLy/iv/lr/wA3/YsGPf6azP2gxvqKnwS7icgx3qKnwS7ic0gaYj1c/hl3EhHiPVz+GXcBBwT+UofpU/tRbKnBP5Sh+lT+1FsAAYAyDAAyAYAyDBkAAAAAAAwAMgAADAAyDAAyDAAyAAAMADIMADIMGQIsRQjUhlluun807pkNHBQpU5Rina1rN3sluXu1f1JcVWdODko5rW0+er+W8goY5VITkk7JXVtbp3s/2egE+E9VT+GPcTEOE9VT+GPcTAAAAAAAAAAAAAAAAADAK1eTnLZRen87XIuj733BK1X8aX/Ki9P87XL7l+5bMRikkkrJbjMnZXYpIMr+tf8Ay/v/ANO8a1fZT5uWf+hYSM/9v036EjIBplXx3qKnwS7icgx3qKnwS7icDJHiPVz+GXcSEeI9XP4ZdwEHBP5Sh+lT+1FsqcE/lKH6VP7UWwOdi8fOnOcIxUp+Y6a6UbPN81ll9Ua+VupOLhbK5yjCWtnak220mrq+lvYX5UYuam4rNFNJ8qT39xrTwtOKiowSUG3FLkbvfvYHMwk60vJ/4kc0qEpNyTa3wtdX1eu8uU8bfCxrO0W4KVtWrvk01epNQwlOnbJG1k0tW7J2ulfk0Rt5NDZ7PKslrZeSwFfA4ic5ThNaxyu+Vx0d+Rt83OQ1MRNq7y5VXjCKWZPSpa7af7f+i7Rw0IXcVq7Xbbbdt12zPk8LWy6Zs/8A1Xvf6gc7yirUVGpeKhKqrRSeZLVau+u7dY3o42q1RqPJkqysopPNFZZSWt9X5vMWlgKSlmya5sy1dlLnS3Jlanwa1VjNuCUZSklGLTbaa5W0t7vZasDGCx9So6cst4VNdKclkVrpuT0fN8yatWqus6dNwVoRneSb1bato1zEtLB04SzRjZ68rsr77LciOtgI1KrnPVOCjZNrc23qnud93sAqR4UnO2SNns4Ta2cp3cr6XjuWm82rY6tapKKjFU4Qm4yTcneLbjv03F2pgqcrXhuWVWbj5vR05PYbSw0GpJxXnpKXtSVrAUsRj5xqebZwU6cH5j3yaXpXtdZuYhoVKu0gozVnPFaSTd7VHblOjPBU3LM46txe92bVrO26+i1DwkNHls4ylJavSTd39XyAQ4THOrKKSStDNU54yvZR+ql9CPEYyottOOTJR3pp5pWipPW+mj00ZZweF2am205zk5yaVlfdovcjNXBU5yzShd6X1dnbddbn8wK2CjKWIrzk07OMY6NWTinbfbl+pVnrSniHKSrRqyS852SVTKoW3Wat9bnYhTUXJpWcnd+12t3JETwVN1NpkWa6d7u11udt1/aBNN2i2ldpPQ4+HWWOFqqcpVKriptybU1KDb03KzWnMdWhRUItJvWUpa87dyOGBpRk5KCTd+V2V99luV/YBUxuGnKtKWTPBQjZbVws03fd8i9haqnShON1GUYtX32aI58H0pJJw3RUVq15q5HZ6r3klPDxjNyWl4xjbkSje1l8wKdbG1EqtRZNnSlZxaeaVkm9b6PXRW7yxha05zqXyqEZuKSTu7W1bNqmCpynncLyunvdm1ubW5v3ksKajeytdtv2vnAqcJ0JVFTUVdKpeSzuF1lkt69rRph6uWjWUYZZ0s145nNZsuZWb36NFyvh4VFacbpO65GnzprcRRwMI5MqcVCTno97aa1fLvAoRWy2E6cpSlUUs15N5/4blmt70t3PYUY5FhqkZSlOr6d5N57wcm7btGuQ6NHBU6cs0IJPVcui5UlyfIUsFThLNGCUtefS++y3L5Ac3DaQwtZSk6lVxz3k2pZottW3K1vlY7RXp4KlGeeMEpa8rsr77Lcr+wsAazgpK0kmuZq6NHTjGMssUr3bsrXdt5pjqk40pOnHNO2i/ra+vuK+Er1Z05ucLStomstnZ3j3a+0C1hfVU/hj3ExDhfVU/hj3EwAAAAAAAAAAAAAAMGTWpNRi5N2SV2wKvCWIqU6f8GKnVlpGLe/nfyJsLSyQSfpPWTve8nv1NMPBtupNWk9EujHm9/KySrWUdN8nuS3sW4kmtqk1FXbsiJQdR3mrR5I8/tl/YzCk2809XyJbo/6+0mM+/bfr0GQDTIAAK+O9RU+CXcTkGO9RU+CXcTgZI8R6ufwy7iQjxHq5/DLuAg4J/KUP0qf2otlTgn8pQ/Sp/ai2AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACLEVXCDkouTVtFvepKYA0o1M0U3GUXzS3r6Ganov3MjxcHKlKMd7VlZ2t7blbCYerCnNVJ+c1vzZtbay13cmnsAtYX1VP4Y9xMQ4X1VP4Y9xMAAAAAAAAAAAAAAYKr/iz/AOXB9qS/ou/3GMbiLNU4t55c2rUede3mNqdBuKi1kgt0E9fmybhmtpVnJ2p688n6K/uzelRUbvfJ7297N4xSVkrIyJP9q79BkAqAAAAACvjvUVPgl3E5BjvUVPgl3E4GSPEern8Mu4kI8R6ufwy7gIOCfylD9Kn9qLZU4J/KUP0qf2otgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa1JqKvJpLnbsjR1IyjLLJO107O9nYYmk5wcVLLfS9r6cpXo4GNOE1e6atbdZK7S/dgT4X1VP4Y9xMQ4T1VP4Y9xMAAAAAAADAGTBpUrRjvaXMuV+5EeecvRjlXPLf8AJf3Jq4lnUUVdtJe0rV8TK3mq19I39KT9i/qyRwhTTnN3a/mlq/l/oYw9Nye0mrSekY9CPN7+cZb7LZPTOFw+RXk803rKXP7PcTgyVAAAAYAGTBFPEwi7Zrvmjq/ojXaVJejBRXPPf9F/cmpqe5D5TFu0by58q0Xz3GFhU/Tk5+x+j9F/UnUUtEPJ5QY71FT4JdxOQY71FT4JdxOVWSPEern8Mu4kI8R6ufwy7gIOCfylD9Kn9qLZU4J/KUP0qf2otgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARV6uSOazesVp7Xa/7kNPGRnSnL0Ul/NzOKaf0aJ6tKM45ZJNaOz9m41VKMISUYqK1ei5QGE9VT+GPcTFWNNzw6ipOLcEsy3rTeiHDVXQjGnWqZrKyqPl+JcjA6AIPKov0VKXuj/UZ6j3QS+KX9EZ+UXKnNZzUVdtJe12ItlN+lUfuirf6mYYeCd7a871f1Y2mRjyi/oRcvbuX1YyTl6Usq5o7/AKsmDYz7N+mlOjGO5a8/K/mbTmoptuyW9siq4qnBNynFW9uv0Ksa8aklKd8q1jBJvXnlbl9hZkZvSanF1GpzVorWEX9z9vs5C0QeUPkpTfyS72M9V7oRXvl/RIWpKsGCDZ1HvqJfDH+rHkqfpTnL3ysvohq6kqVox9KUV73Yj8qT9GMpe5WX1ZvToQj6MIr3IkHk8oL1ZckYe/zn/QeTX9Ocpey9l9EWAMMaQpqKtFJL2I2MgqgAAr471FT4JdxOQY71FT4JdxOBkjxHq5/DLuJCPEern8Mu4CDgn8pQ/Sp/ai2VOCfylD9Kn9qLYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGClTxM5bZON8t7JKz3tW1dnok/mXTWp6L9zA0w3qYfBHuIlh5LcqSv/AJP9SXC+qh8Me4lJZoorD16a8ycWui09FzJ37zNKrKTyurll0XBJ/Ln+RdNKlKM1aUVJe1XLn2z8fpHsJddP6R/sPJuepUf/AFW7jHkcV6Mpx903b6PQeTy5a1R9ldyHxhg8LDe3K3tqSt3lPEuOzk8PQVaaTtdebf3vf8i4sHD+bNL45OS+jdiwkhkMVMLhLWnOMdpbkXmw9kf7lsALJgAAoZMADIMADIMADIMADIMACDHeoqfBLuJyDHeoqfBLuJwMkeI9XP4ZdxIR4j1c/hl3AQcE/lKH6VP7UWypwT+UofpU/tRbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGDIAp1aFKMoR2EGpNxvlWnmt/0IsFSoyTiqMfMsk2k21qk/2ZdqUYzacoqWV3V1ez50Zp0oxvlild3dla752BH5FS6qHZRXqYek5unsY+ipppJXs9xfIp4eEpZnCLk1lu1rbm9wEGGoUqkFLYwV7/yrn3r2EvkVLqodlElOmoxUYpKK3JbkbAcqvPDwjtJ0YxtN01G0dXff9Fcv+R0uqh2UbRw0EmlCKUnd6b3zskArzwlJJtUYN23ZVqVIbBzp/wAGN6sYy1irRVtFu36nTlFNNNXT3kUMLTjltTisvo2Xo+7mAx5FS6qHZRFiKFKEc2wg9Yr0VytK/wC5bK9SpTqVHRlHM0lOzV1a+j+q/YCtQp0drKiqK81XzOK13N/ci35FS6qHZRvCjCMnJRSk9G0tWSAUMThqV4w2MbTvG6SutN/+owkKNVNqhFWa3xWqaTT+jLc6EJSUpRTkrpNrVX32M06UY3yxSu7uytd84EfkVLqodlFWpSpZprYQ/h5ZX0Se/Rvk/wDR0SCWEpvNenF5/S81ed7+cDSlhqUoRlsYq6Ts4K6utzN/IqXVQ7KJoxSSS0SAHJnPDwSnKjFOU1SS82z87en8/wBrHQ8jpdVDsozDDU4xyxhFR5ktOclAh8ipdVDsomMgAR4j1c/hl3EhHiPVz+GXcBBwT+UofpU/tRbONwbWl5NQ1/wqf2os7eXSA6AOft5dIbeXSA6AOft5dIbeXSYHQBz9vLpMbeXSA6AOft5dIbeXSA6AOft5dIbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQBz9vLpMbeXSYHQMFDby6TPJcNYxyxuIpvF4mnV2UHQhSqSjCUsrbTS5Xb5hZNuPeA+VS4Tk6FPJjca8Q5tOO2nlyv0ba79x6P8P4teWVqdPFYitGFKKltqjklUUrSy35PaGuuM/2PZAobeXSY28ukwwvgobeXSY28ukwL4KG3l0mNvLpMC+Cht5dJjby6TAvgobeXSY28ukwL4KG3l0mNvLpMC+YyK97K/PbUo7eXSY28ukwL4KG3l0mNvLpMC+Cht5dJjby6TAvgobeXSY28ukwL4KG3l0mNvLpMC+Cht5dJjby6TAvgobeXSY28ukwL5piPVz+GXcU9vLpM0rVpZJed/K+4Ctwb+Wo/p0/tRR4Qp1oVK9WnFtSpU0nF3kpQcnbIk207paclzq8F015LQ0/wqf2otbJcwHl54HG2zQqZZzyuequnllovZFtL2pcvLb4Vwlabk6SjLPQqUXeWXK5WtLdqt53dkuYbKPMB5nE0MXTi5KrNxWa8Y6vKpwyqKUW08ufn3mlKjj5xhJTcU4T0k7Su4zy3TWjTcOTk+R6nZR5hslzAc54W8qUnUqXpqWmbSd1bz1ynGxnAledao4zjsZSso3/kqJKuv/FNe9nqtkuYbJcwHl5YThDK/wCMr5uRpeb52sebfDT2e+9zgzCV6dWrtZKVOTnKOV2s3K7bXO/2tY7myjzDZR5gPP8AAHB08MpxkkoWhl1i5Nq925Rirrda+u865Z2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBWBZ2UeYbKPMBXPnf4vrOnwqqkbXgqMlfddaq/0PpmyXMU8TwLha089XD05y0V5K7A8NLF7LNwmqEMlaKp06eXzYVv52vdkbvy5vebf7PPX1/04/ce0fAODcFB4ankTuo2eVPntuuS4TgnD0G3RoQptqzcVa6GjILOyXMNkuYCsCzslzDZLmArAs7Jcw2S5gKwLOyXMNkuYCsCzslzDZLmArAs7Jcw2S5gKwLOyXMNkuYCsCzslzDZLmArAs7Jcw2S5gKwLOyXMNkuYCsCzslzDZLmArAs7Jcw2S5gKwLOyXMNkuYCsa1vQl8L7i3slzGlaksktP5X3AR8F/laH6VP7UWirwX+VofpU/tRaAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAaVvQl8Mu43NK3oS+GXcBBwX+VofpU/tRaPmFD8e4unThBUsPaEYxV4yvZK2vnEnGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WD5pxh4zqsP2J+McYeM6rD9ifjA+lg+acYeM6rD9ifjHGHjOqw/Yn4wPpYPmnGHjOqw/Yn4xxh4zqsP2J+MD6WaVvQl8Mu4+b8YeM6rD9ifjMS/2g4xprZYfVNejPxgeTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//2Q==\n"
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 9
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "aAGlmnIjWrJ7"
      },
      "source": [
        "One way to think about Regularization is to think in terms of the magnitude of the overall weights of the model. A model with big weights can fit more data perfectly. Wheras a model with smaller weights tend to underperform on the train set but can suprisingly do very well on the test set. Too small of a weights can also be as issue an it can the underfit the model.\n",
        "\n",
        "This week we use the sum of Frobenius Norm of all the tensors in the model as a metric to measure the \"size of the model\"."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "avenCkhxzgSb"
      },
      "source": [
        "##Exercise 1: Frobenius Norm\r\n",
        "Before we start let us do a quick recollection of Frobenius Norm. The Frobenius norm, sometimes also called the Euclidean norm (a term unfortunately also used for the vector $L^2$ norm), is matrix norm of an m×n matrix A defined as the square root of the sum of the absolute squares of its elements.\r\n",
        "\\begin{equation}\r\n",
        "||A||_F= \\sqrt(\\sum_{i=1}^m\\sum_{j=1}^n|a_{ij}|^2)\r\n",
        "\\end{equation} "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "NSi9VW7Tx3eG"
      },
      "source": [
        " **Hint:** Use functions model.parameters() or model.named_parameters()\r\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "gLmcWspCBGFR"
      },
      "source": [
        "def calculate_frobenius_norm(model):\r\n",
        "\r\n",
        "    ####################################################################\r\n",
        "    # Fill in all missing code below (...),\r\n",
        "    # then remove or comment the line below to test your function\r\n",
        "    raise NotImplementedError(\"Define the grad visualization function\")\r\n",
        "    ####################################################################\r\n",
        "\r\n",
        "    norm = 0.0\r\n",
        "\r\n",
        "    # Sum all the parameters\r\n",
        "    for ... in ...:\r\n",
        "        norm += ...  \r\n",
        "\r\n",
        "    # Take a square root of the sum of squares of all the parameters\r\n",
        "    norm = ...\r\n",
        "    return norm\r\n",
        "\r\n",
        "#uncomment to run\r\n",
        "# net = nn.Linear(10,1)\r\n",
        "# print(f'Frobenius Norm of Single Linear Layer: {calculate_frobenius_norm(net)}')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8Ngmfrlr2cbG"
      },
      "source": [
        "[Click for solution](https://github.com/CIS-522/course-content/blob/main/tutorials/W05_Regularization/solutions/W5_Tutorial1_Ex01.py)\n",
        "\n",
        "Example Output:\n",
        "\n",
        "\n",
        "![image.png]()\n",
        "\n",
        "(or)\n",
        "\n",
        "![image.png]()\n",
        "\n",
        "\n",
        "**Note:**  The numerical value might be different from the value shown here. This is due to random initialization.\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_pYQM7QcEV2u"
      },
      "source": [
        "#Section 2: Overfitting\n",
        "(Time Estimate: 30 min from start)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "GHrk7xaMoFQZ",
        "cellView": "form"
      },
      "source": [
        "#@title Video : Overfitting\n",
        "try: t3;\n",
        "except NameError: t3=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"RlaGyRKP2nY\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7Lh2KZwvV6kF"
      },
      "source": [
        "##Visualizing Overfitting\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lg5stXd0cR6U"
      },
      "source": [
        "Let's create some synthetic dataset that we will use to illustrate overfitting in neural networks."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "qQ-J57s3Ee03"
      },
      "source": [
        "#creating train data\n",
        "X = torch.rand((10,1))\n",
        "X.sort(dim = 0)\n",
        "Y = 2*X + 2*torch.empty((X.shape[0],1)).normal_(mean=0,std=1) #adding small error in the data\n",
        "\n",
        "X = X.unsqueeze_(1)\n",
        "Y = Y.unsqueeze_(1)\n",
        "\n",
        "#visualizing trian data\n",
        "plt.scatter(X.numpy(),Y.numpy())\n",
        "plt.xlabel('x')\n",
        "plt.ylabel('y')\n",
        "plt.title('toy dataset')\n",
        "plt.show()\n",
        "\n",
        "#creating test dataset\n",
        "X_test = torch.linspace(0,1,40)\n",
        "X_test = X_test.reshape((40,1,1))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4mU0QnD1tYIH"
      },
      "source": [
        "Let's create an overparametrized NN that can fit on the dataset we just created and train it."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Pzw7b04rOSpd"
      },
      "source": [
        "##Network Class - 2D\n",
        "class Net(nn.Module):\n",
        "    def __init__(self):\n",
        "        super(Net, self).__init__()\n",
        "\n",
        "        self.fc1 = nn.Linear(1, 300)\n",
        "        self.fc2 = nn.Linear(300, 500)\n",
        "        self.fc3 = nn.Linear(500, 1)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = F.leaky_relu(self.fc1(x))\n",
        "        x = F.leaky_relu(self.fc2(x))\n",
        "        output = self.fc3(x)\n",
        "        return output"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "bUFGn_PROuvP"
      },
      "source": [
        "#train the network on toy dataset\n",
        "model = Net()\n",
        "criterion = nn.MSELoss()\n",
        "optimizer = optim.Adam(model.parameters(),lr = 1e-4)\n",
        "max_epochs = 10000\n",
        "iters = 0\n",
        "\n",
        "running_predictions = np.empty((40,(int)(max_epochs/500 + 1)))\n",
        "\n",
        "train_loss = []\n",
        "test_loss = []\n",
        "model_norm = []\n",
        "\n",
        "for epoch in tqdm(range(max_epochs)):\n",
        "\n",
        "    #training\n",
        "    model_norm.append(calculate_frobenius_norm(model))\n",
        "    model.train()\n",
        "    optimizer.zero_grad()\n",
        "    predictions = model(X)\n",
        "    loss = criterion(predictions,Y)\n",
        "    loss.backward()\n",
        "    optimizer.step()\n",
        "\n",
        "    train_loss.append(loss.data)\n",
        "    model.eval()\n",
        "    Y_test = model(X_test)\n",
        "    loss = criterion(Y_test,2*X_test)\n",
        "    test_loss.append(loss.data)\n",
        "\n",
        "    if(epoch % 500 == 0 or epoch == max_epochs - 1):\n",
        "        running_predictions[:,iters] = Y_test[:,0,0].detach().numpy()\n",
        "        iters += 1"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iAzaEKmZt9aR"
      },
      "source": [
        "Now that we have finished training, let's see how the model has evolved over the training process."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dJP-u-sDhUGz",
        "cellView": "form"
      },
      "source": [
        "#@title Animation (Run Me!)\n",
        "fig = plt.figure()\n",
        "ax = plt.axes()\n",
        "def frame(i):\n",
        "    ax.clear()\n",
        "    ax.scatter(X[:,0,:].numpy(),Y[:,0,:].numpy())\n",
        "    plot = ax.plot(X_test[:,0,:].detach().numpy(),running_predictions[:,i])\n",
        "    title = \"Epoch: \" + str(i * 500)\n",
        "    plt.title(title)\n",
        "    ax.set_xlabel(\"X axis\")\n",
        "    ax.set_ylabel(\"Y axis\")\n",
        "    return plot\n",
        "anim = animation.FuncAnimation(fig, frame, frames=range(20), blit=False, repeat=False, repeat_delay=10000)\n",
        "anim"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xQhwOF0iqsYh",
        "cellView": "form"
      },
      "source": [
        "#@title Plot the train and test losses [Note: You may have to run this twice]\n",
        "plt.plot(train_loss,label='train_loss')\n",
        "plt.plot(test_loss,label='test_loss')\n",
        "plt.ylabel('loss')\n",
        "plt.xlabel('epochs')\n",
        "plt.title('loss vs epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "C43NJVU9eKpx"
      },
      "source": [
        "Now let's visualize the frobenious norm of the model as we trained and you should see that the value of weights increased lineraly w.r.t epochs"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JwiOImKSzLOC",
        "cellView": "form"
      },
      "source": [
        "#@title Frobenious norm of the model \n",
        "plt.plot(model_norm)\n",
        "plt.ylabel('norm of the model')\n",
        "plt.xlabel('epochs')\n",
        "plt.title('Size of the model vs Epochs')\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GfVSopl21FQS"
      },
      "source": [
        "What trend do you see w.r.t to train and test losses( where do you see the minimum of these losses)? What does it tell about the model we trained? Discuss among yourselves."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kNulBEvV0_Ll",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "loss_trend = '436565%' #@param {type:\"string\"}"
      ],
      "execution_count": 10,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Rb_o51L8dw6w"
      },
      "source": [
        "## Overfitting on Test Dataset\n",
        "\n",
        "\n",
        "In principle, we should not touch our test set until after we have chosen all our hyperparameters. Were we to use the test data in the model selection process, there is a risk that we might overfit the test data. Then we would be in serious trouble. If we overfit our training data, there is always the evaluation on test data to keep us honest. But if we overfit the test data, how would we ever know?\n",
        "\n",
        "Note that there is another kind of overfitting: you do \"honest\" fitting on one set of images or posts, or medical records, but it may not generalize to other sets of images, posts or medical records.\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "E_Y9eHGXe5-D"
      },
      "source": [
        "##Validation Dataset\n",
        "A common practice to address this problem is to split our data three ways, using a validation dataset (or validation set) to tune the hyperparameters.\n",
        "\n",
        "Ideally we would only touch the test data once, to assess the very best model or to compare a small number of models to each other, real-world test data is seldom discarded after just one use.\n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5risoZLh-boy"
      },
      "source": [
        "# Section 3: Memorization\n",
        "(Time Estimate: 50 min from start)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "uutJgdsFAk7Y"
      },
      "source": [
        "Given sufficiently large networks and enough training, Neural Networks can acheive almost 100% train accuracy.\n",
        "\n",
        "In this section we train three MLP's one each on:\n",
        "\n",
        "\n",
        "1.   Animal Faces Dataset\n",
        "2.   Completely Noisy Dataset (Random Shuffling of all labels)\n",
        "3.   Partially Noisy Dataset (Random Shuffling of 15% labels)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wgTen1PLmrWK"
      },
      "source": [
        "Now, think for a couple of minutes as to what the train and test accuracies of each of these models might be, given you train for sufficient time and use a powerful network."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kzag0GS8nAaP",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "expected_accuracies = 'vhvhjhjg' #@param {type:\"string\"}"
      ],
      "execution_count": 11,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "x4QhI5n_skGC"
      },
      "source": [
        "Now, let's create the required dataloaders for all the three datasets. Take a quick look at how we split the data. We train on a fraction of the dataset as it will be easier to train and also visualize overfitting."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QGBBuMD3vSvT"
      },
      "source": [
        "##Dataloaders for the Dataset\r\n",
        "batch_size = 128\r\n",
        "classes = ('cat', 'dog', 'wild')\r\n",
        "\r\n",
        "train_transform = transforms.Compose([\r\n",
        "     transforms.ToTensor(),\r\n",
        "     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))    \r\n",
        "     ])\r\n",
        "data_path = pathlib.Path('.')/'afhq' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=train_transform)\r\n",
        "\r\n",
        "\r\n",
        "####################################################\r\n",
        "\r\n",
        "##Dataloaders for the  Original Dataset\r\n",
        "\r\n",
        "\r\n",
        "img_train_data, img_val_data,_ = torch.utils.data.random_split(img_dataset, [100,100,14430])\r\n",
        "\r\n",
        "#Creating train_loader and Val_loader\r\n",
        "train_loader = torch.utils.data.DataLoader(img_train_data,batch_size=batch_size,worker_init_fn=seed_worker)\r\n",
        "val_loader = torch.utils.data.DataLoader(img_val_data,batch_size=1000,worker_init_fn=seed_worker)\r\n",
        "\r\n",
        "#creating test dataset\r\n",
        "test_transform = transforms.Compose([\r\n",
        "     transforms.ToTensor(),\r\n",
        "     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))  \r\n",
        "     ])\r\n",
        "img_test_dataset = ImageFolder(data_path/'val', transform=test_transform)\r\n",
        "\r\n",
        "\r\n",
        "####################################################\r\n",
        "\r\n",
        "##Dataloaders for the  Random Dataset\r\n",
        "\r\n",
        "#splitting randomized data into training and validation data \r\n",
        "data_path = pathlib.Path('.')/'afhq_random_32x32/afhq_random' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=train_transform)\r\n",
        "random_img_train_data, random_img_val_data,_ = torch.utils.data.random_split(img_dataset, [100,100,14430])\r\n",
        "\r\n",
        "#Randomized train and validation dataloader\r\n",
        "rand_train_loader = torch.utils.data.DataLoader(random_img_train_data,batch_size=batch_size,num_workers = 0, worker_init_fn=seed_worker)\r\n",
        "rand_val_loader = torch.utils.data.DataLoader(random_img_val_data,batch_size=1000,num_workers = 0,worker_init_fn=seed_worker)\r\n",
        "\r\n",
        "####################################################\r\n",
        "\r\n",
        "##Dataloaders for the Partially Random Dataset\r\n",
        "\r\n",
        "#Splitting data between training and validation dataset for partially randomized data\r\n",
        "data_path = pathlib.Path('.')/'afhq_10_32x32/afhq_10' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=train_transform)\r\n",
        "partially_random_train_data, partially_random_val_data,_ = torch.utils.data.random_split(img_dataset, [100,100,14430])\r\n",
        "\r\n",
        "#Training and Validation loader for partially randomized data\r\n",
        "partial_rand_train_loader = torch.utils.data.DataLoader(partially_random_train_data,batch_size=batch_size,num_workers = 0,worker_init_fn=seed_worker)\r\n",
        "partial_rand_val_loader = torch.utils.data.DataLoader(partially_random_val_data,batch_size=1000,num_workers = 0,worker_init_fn=seed_worker)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "iKu5dp7KgJIN"
      },
      "source": [
        "Now let's define a model which has a very high number of parameters when compared with the training data points and train it on all these datasets."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HQ9HSgxr-f3r"
      },
      "source": [
        "##Network Class - Animal Faces\n",
        "class Big_Animal_Net(nn.Module):\n",
        "    def __init__(self):\n",
        "        torch.manual_seed(104)\n",
        "        super(Big_Animal_Net, self).__init__()\n",
        "        self.fc1 = nn.Linear(3*32*32, 124)\n",
        "        self.fc2 = nn.Linear(124, 64)\n",
        "        self.fc3 = nn.Linear(64, 3)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(x.shape[0],-1)\n",
        "        x = F.leaky_relu(self.fc1(x))\n",
        "        x = F.leaky_relu(self.fc2(x))\n",
        "        x = self.fc3(x)\n",
        "        output = F.log_softmax(x, dim=1)\n",
        "        return output"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QrvUOxYBfRVG"
      },
      "source": [
        "##Here we have 100 true train data.\r\n",
        "args = {'test_batch_size': 1000,\r\n",
        "        'epochs': 200,\r\n",
        "        'lr': 5e-3,\r\n",
        "        'momentum': 0.9,\r\n",
        "        'no_cuda': False,\r\n",
        "        }\r\n",
        "\r\n",
        "acc_dict = {}\r\n",
        "model = Big_Animal_Net()\r\n",
        "\r\n",
        "start_time = time.time()\r\n",
        "val_acc_pure, train_acc_pure, _, model ,_ = main(args,model,train_loader,val_loader,img_test_dataset)\r\n",
        "end_time = time.time()\r\n",
        "\r\n",
        "print(\"Time to memorize the dataset:\",end_time - start_time)\r\n",
        "\r\n",
        "##Train and Test accuracy plot\r\n",
        "\r\n",
        "plt.plot(val_acc_pure,label='Val Accuracy Pure',c='red',ls = 'dashed')\r\n",
        "plt.plot(train_acc_pure,label='Train Accuracy Pure',c='red',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_pure),c = 'green',ls = 'dashed')\r\n",
        "plt.title('Memorization')\r\n",
        "plt.ylabel('Accuracy (%)')\r\n",
        "plt.xlabel('Epoch')\r\n",
        "plt.legend()\r\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "jW9z2IggZRKU"
      },
      "source": [
        "plt.plot(val_acc_pure,label='Val Accuracy Pure',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_pure,label='Train Accuracy Pure',c='red',ls = 'solid')\n",
        "plt.axhline(y=max(val_acc_pure),c = 'green',ls = 'dashed')\n",
        "plt.title('Memorization')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ERwLcdXb8TRw"
      },
      "source": [
        "##Exercise 2: Data Visualizer\n",
        "Before we proceed to train the model on a data with random labels, let us visualize and verify for ourselves if the data is random or not. Here, we have classes = (\"cat\",\"dog\",\"wild\"). \n",
        "\n",
        "**Hint:** Use .permute() method. plt.imshow() expects imput to be in numpy format and in the format (Px,Py,3), where Px and Py are the number of pixels along axis x and y respectively."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "seeyR3dUFafW"
      },
      "source": [
        "def Visualize_data(dataloader):\n",
        "  ####################################################################\n",
        "  # Fill in all missing code below (...),\n",
        "  # then remove or comment the line below to test your function\n",
        "  # The dataloader here gives out mini batches of 100 data points.\n",
        "  raise NotImplementedError(\"Complete the Visualize_random_data function\")\n",
        "  ####################################################################\n",
        "\n",
        "  for idx,(data,label) in enumerate(...):\n",
        "    plt.figure(idx)\n",
        "    #Choose the datapoint you would like to visualize\n",
        "    index = ...\n",
        "\n",
        "    #choose that datapoint using index and permute the dimensions and bring the pixel values between [0,1]\n",
        "    data = ...\n",
        "\n",
        "    #Convert the torch tensor into numpy\n",
        "    data = ...\n",
        "    \n",
        "    plt.imshow(data)\n",
        "    image_class = classes[...]\n",
        "    print(f'The image belongs to : {image_class}')\n",
        "\n",
        "  plt.show()\n",
        "\n",
        "##uncomment to run the function\n",
        "#Visualize_data(rand_train_loader)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "b7cwvw_pJvx_"
      },
      "source": [
        "[Click for solution](https://github.com/CIS-522/course-content/blob/main/tutorials/W05_Regularization/solutions/W5_Tutorial1_Ex02.py)\n",
        "\n",
        "Example Solution:\n",
        "\n",
        "![Rand_data.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8L78WJxqgsoZ"
      },
      "source": [
        "Now lets train the network on the completely shuffled data and see if it memorizes."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tE9wtARrg6nx"
      },
      "source": [
        "##Here we have 100 completely shuffled train data.\n",
        "args = {'epochs': 200,\n",
        "        'lr': 5e-3,\n",
        "        'momentum': 0.9,\n",
        "        'no_cuda': False\n",
        "        }\n",
        "\n",
        "acc_dict = {}\n",
        "model = Big_Animal_Net()\n",
        "\n",
        "\n",
        "val_acc_random, train_acc_random, _,model,_ = main(args,model,rand_train_loader,val_loader,img_test_dataset)\n",
        "\n",
        "##Train and Test accuracy plot\n",
        "\n",
        "plt.plot(val_acc_random,label='Val Accuracy random',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_random,label='Train Accuracy random',c='red',ls = 'solid')\n",
        "plt.axhline(y=max(val_acc_random),c = 'green',ls = 'dashed')\n",
        "plt.title('Memorization')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "h9JAPoEWg1bp"
      },
      "source": [
        "Finally lets train on a parially shuffled dataset where 15% of the labels are noisy."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Cf0ovppPnrxs"
      },
      "source": [
        "##Here we have 100 partially shuffled train data.\n",
        "args = {'test_batch_size': 1000,\n",
        "        'epochs': 200,\n",
        "        'lr': 5e-3,\n",
        "        'momentum': 0.9,\n",
        "        'no_cuda': False,\n",
        "        }\n",
        "\n",
        "acc_dict = {}\n",
        "model = Big_Animal_Net()\n",
        "\n",
        "\n",
        "val_acc_shuffle, train_acc_shuffle, _,_,_ = main(args,model,partial_rand_train_loader,val_loader,img_test_dataset)\n",
        "\n",
        "#train and test acc plot\n",
        "plt.plot(val_acc_shuffle,label='Val Accuracy shuffle',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_shuffle,label='Train Accuracy shuffle',c='red',ls = 'solid')\n",
        "plt.axhline(y=max(val_acc_shuffle),c = 'green',ls = 'dashed')\n",
        "plt.title('Memorization')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ZCM3xI3DzJ0M",
        "cellView": "form"
      },
      "source": [
        "#@title Plotting them all together (Run Me!)\n",
        "plt.plot(val_acc_pure,label='Val - Pure',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_pure,label='Train - Pure',c='red',ls = 'solid')\n",
        "plt.plot(val_acc_random,label='Val - Random',c='blue',ls = 'dashed')\n",
        "plt.plot(train_acc_random,label='Train - Random',c='blue',ls = 'solid')\n",
        "plt.plot(val_acc_shuffle,label='Val 15% shuffle',c='green',ls = 'dashed')\n",
        "plt.plot(train_acc_shuffle,label='Train 15% shuffle',c='green',ls = 'solid')\n",
        "plt.title('Memorization')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dSfURqxBwq97"
      },
      "source": [
        "Given that the NN fit/memorize the training data perfectly, Do you think it generalizes well? What makes you think it does or doesn't?\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "JOBzD7_cxPDO",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "memorize_or_generalize = '[][][][][][][][][' #@param {type:\"string\"}"
      ],
      "execution_count": 12,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mKhJ_nxdaK-7"
      },
      "source": [
        "Isn't it supprising to see that the NN was able to acheive 100% train accuracy on randomly shuffled labels. This is one of the reasons why training accuracy is not a good indicator of model performance. \n",
        "\n",
        "Also it is interesting to note that sometimes the model trained on slightly shuffled data does slightly better than the one trained on pure data.  "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "lMLQYrOBII5a"
      },
      "source": [
        "#Section 4: Early Stopping\n",
        "(Time Estimate: 70 min from start)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "KoY7sfDBojrs",
        "cellView": "form"
      },
      "source": [
        "#@title Video : Early Stopping\n",
        "try: t4;\n",
        "except NameError: t4=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"GA6J-50GCWs\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nV3jPkkgW7fN"
      },
      "source": [
        "\n",
        "Now that we have established that the validation accuracy reaches the peak well before the model overfits we want to somehow stop the training early. You should have also observed from the above plots that the train/test loss on real data is not very smooth and hence you might guess that the choice of epoch can play a very large role on the val/test accuracy of your model. \n",
        "\n",
        "Early stopping is a way to end training when the validation accuracies do not increase for over a certain number of epochs. Though this makes sure that we do not overfit on the train data we still haven't solved the problem of local variance. To overcome this we also save the best model based on the val loss/accuracy for use on test dataset.\n",
        "\n",
        "![Overfitting](https://images.deepai.org/glossary-terms/early-stopping-machine-learning-5422207.jpg)"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "AEjIGMSiiAwQ"
      },
      "source": [
        "## Exercise 3: Early Stopping\n",
        "Reimplement the main function to include early stopping using the above mentioned stratergy.Then run the code below to validate your implementation."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "J7MjxZDTKgkz"
      },
      "source": [
        "def early_stopping_main(args, model,train_loader,val_loader,test_data):\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in all missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Complete the early_stopping_main function\")\n",
        "    ####################################################################\n",
        "\n",
        "    use_cuda = not args['no_cuda'] and torch.cuda.is_available()\n",
        "    device = torch.device('cuda' if use_cuda else 'cpu')     \n",
        "\n",
        "    model = model.to(device)\n",
        "    optimizer = optim.SGD(model.parameters(), lr=args['lr'], momentum=args['momentum'])\n",
        "\n",
        "    best_acc  = 0.0\n",
        "    best_epoch = 0\n",
        "\n",
        "    # Number of successive epochs that you want to wait before stopping training process\n",
        "    patience = ...\n",
        "\n",
        "    # Keps track of number of epochs during which the val_acc was less than best_acc\n",
        "    wait = ...\n",
        "\n",
        "    val_acc_list, train_acc_list = [], []\n",
        "\n",
        "    for epoch in range(1, args['epochs'] + 1):\n",
        "        train(args, model, device, train_loader, optimizer, epoch)\n",
        "        train_acc = test(model,device,train_loader, 'Train')\n",
        "        val_acc = test(model,device,val_loader, 'Val')\n",
        "        if (val_acc > best_acc):\n",
        "            #...\n",
        "            #...\n",
        "            #...\n",
        "            #...\n",
        "        else:\n",
        "            #...\n",
        "            if (wait > patience):\n",
        "                #....\n",
        "                #....\n",
        "        train_acc_list.append(train_acc)\n",
        "        val_acc_list.append(val_acc)\n",
        "\n",
        "    return val_acc_list, train_acc_list, model, best_epoch"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "5Na0NM8Dzx5f"
      },
      "source": [
        "args = {'epochs': 200,\r\n",
        "        'lr': 5e-4,\r\n",
        "        'momentum': 0.99,\r\n",
        "        'no_cuda': False,\r\n",
        "        }\r\n",
        "\r\n",
        "acc_dict = {}\r\n",
        "model = Animal_Net()\r\n",
        "\r\n",
        "val_acc_pure, train_acc_pure,_,_ ,best_epoch = main(args,model,train_loader,val_loader,img_test_dataset)\r\n",
        "\r\n",
        "model = Animal_Net()\r\n",
        "val_acc_earlystop, train_acc_earlystop,_,best_epoch = early_stopping_main(args,model,train_loader,val_loader,img_test_dataset)\r\n",
        "\r\n",
        "print(\"Maximum Validation Accuracy is reached at epoch:%2d\"%(best_epoch))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "LxE2Yl64cpOx"
      },
      "source": [
        "[Click for Solution](https://github.com/CIS-522/course-content/blob/main/tutorials/W05_Regularization/solutions/W5_Tutorial1_Ex03.py)\n",
        "\n",
        "Example solution:\n",
        "\n",
        "![Screenshot from 2021-02-14 03-01-20.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "C3vFW5Af78sf"
      },
      "source": [
        "Do you think Early stopping can be harmful for the training of your network?Discuss among your pod why or why not?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "uNEqK7PJ76HN",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "early_stopping = \"%\\\" \\\"\" #@param {type:\"string\"}"
      ],
      "execution_count": 13,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ezsI9BNd-9xb"
      },
      "source": [
        "#Section 5: L1 and L2 Regularization\n",
        "(Time Estimate: 85 min from start)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QWwE8shboqK1",
        "cellView": "form",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 519
        },
        "outputId": "52b26bda-ffad-4a38-8be4-dab665324e5b"
      },
      "source": [
        "#@title Video : L1 and L2 regression\n",
        "try: t5;\n",
        "except NameError: t5=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"OLl2nzOeQ68\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": 14,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Video available at https://youtube.com/watch?v=OLl2nzOeQ68\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"854\"\n",
              "            height=\"480\"\n",
              "            src=\"https://www.youtube.com/embed/OLl2nzOeQ68?fs=1\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.YouTubeVideo at 0x7facc454c710>"
            ],
            "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBoYFhwaGRoeHRsfHyUlISIiIiUoJSYmMCcxMjAnLSs2PVBCODhLPS8tRWFFS1VWW11bMkdlbWRYbFBZW1cBERISGRYZLxsaL1c9NT9XV1dXV1dXV1dXV1dXV1dXV1dXV1djV1dXV1dXV1dXV1dXV1dXV1ddV1dXV1dXV1dXV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAAAwQBBQYCB//EAEMQAAIBAgEHBwoEBQQCAwAAAAABAgMRBAUSEyExQVEXMlJhkrHSIjRTcXJzgZGy0QYzQqEUFiNio0NUgsEVogck8P/EABgBAQEBAQEAAAAAAAAAAAAAAAABAgME/8QAIREBAQEAAgICAgMAAAAAAAAAAAERAiExQRJRA/AycYH/2gAMAwEAAhEDEQA/APn4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOoofgLG1IRnF0bTipK83savwJOTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/COTzHcaPbfhA5MHWcnmO40e2/CQY38D4uhDPnKjm3S1Sb2/ADmgbZfh+t0qfzl9j0vw5W6VP5y+wGnBuf5ar9Kn85fYx/LdfpU/nL7AacG5X4ar9Kn85fY9L8L1+nS+cvCBpAb1fhTEP9dL5y8JND8F4mWydHtT8IHOA6qP4Bxb/ANSh2p+E9cn2M9Jh+1PwE2LjkwdS/wABYtf6lDtT8J4f4GxXpKHan4RqOZB0j/BOK6dHtT8I/krFdOj2p+Eo5sHSfyVivSUe1Pwnl/gzE9Oj2p+ED6nknzWh7qn9KLZUyT5rQ91T+lFsADAAyDBkAAYAyDAAyDAAyAAABgDIMGQAMADIBgDIMGQAAAAAAAAAAAAAAAAAAAAAAAABqfxJG+Fa/uj3m2NZ+IPNn7Ue8DkFS4NnpRZPFCUdgHmxhoksYaMq8xRLFHmKJYICWlE2FCJTpIu02StSLkGenIgjI9ZxnFZmyFnts8M1GajaPLPbPDNMvLI5nuRHIDd5J81oe6p/Si0Vck+a0PdU/pRbKKDxkv41UNWZoHU687Ptt4EH4hylVw1Fyo08+SjKTlLmRS2t9evUiDHYlUcoxqTjUcHhnG8Kc569Je3kpnvLFdYjJ2IdKM3eEkk4SjJv2Wrk11nHuXOm0q4iNOm6k3mxjG8nr1LiK2KhBRcpJKUoxj1uWxIzmKUM2SunGzXU1sNBkrD1ZVqdGrGWZglJRk1qqN6qclxtC/xYZnGXW1hljDyqaNVFnOWanZ5rl0VK1m+q4y1jJUMNOrC2dFxtnK61yS/7NRh6zpVIQw06zi6qUsPVpO0IuXlNTtqS1ta2bD8TRbwVRRTk7w1JNvVOL2Ia1OMnKRs5tqLzVd2dk3a74XNLiMXjMPGFWs6EoOcIypwjJSjnSSVpN+U02tyLdHLVGd82NbVFy10ai1JbrrW+o09HKUcRXjUxEK0IQlejR0NR+VsVSbta/BbELThwvuOix9Z06FWpG14U5SV9l1Fs09TKGMo4eOKqOhOlmwlOMYTjJRlbY3Jq6vwNrlWLeFrpK7dKaSWtt5r1GglkzRRwlWWmq0koaWlKc5Zsmlaoo3/S9q4eolPxyZ23uKytQoyzak7StdpKTzVxlZal6z3ico0aUIznUSjK2a1rzr7M1Lb8DRpyoYjE6SrVpqpU0kHCkqkZxcUkr5rd1a1j1GgsNLCVc2rOjCnUjrhedNzaabhFalqcdS1F0+E6banlfDzjOUasWqcc6e3yVr2rdseosVMTCLgpSs6jtHrdm7fJM5yvJ4iWUNHCV54amoqUc1y1T12ev58CxVxsa9XBaONRqFW826coqP8ASkrNtbRpfx/v+LuEytHQzq15RglWqU1a+vNm0kltb1biellWhOnOpGos2nz7ppx1X1xaujSxdWnQXPpweMraScY3nGDnJppWepu2u2xkdSnKSyg46WopYWChKUGnJ2qalqV9pNa+ErcvKkJyp6GdOUZVdHJvO1+Q3aDSs3q9W0zLLeGUs11LPOzdcZ869rXtxIMbSd8BaLtGsr2XNWhmtfDcVFjoVcZnVo1YwoyzaMNFUactjqtpW6l1ay6zOMraYnLGHpTcJ1EpRtnam1G+zOaVo/EjrZZpwxVPDv8AXFu+vU7xzVs33evqNZjas3LFxqTrQn5SpU4U7wnDM1PmvOb1p69VtxNCpoq2BlOM83+HlTuoydpPR2TstWx7eBNX4T9/psKOUYxpTq1p04xhUnHOi3ZWlZJ3/Vu9ZJhcp0a2do5puGuSaaklxcXrNHGjOMIVZU5SjSx1ec4qLbzXKaU0t9rp6iy6n8TitNSjLMp0KkJTcZRz3KzUUmru1m/iXS8IvUsuYacoRjVTc7KLs81t/pzrWv1bTYHP6CX/AI7BxzHnRlhW1Z3Vpxvdbt50AjHOSeGQAVgAAAAAAAAAAAAAAAAAAA1uX/N37UTZGty75u/aiBzaRloyohgYsYaPaMNGWmIolgRxJIgTUyWeIUFd/IgizW5RxC0qjKSXAjfGbW3jj7rZt2HunjHvNPHUuPqFDF67Spyinsd7r4lkdbI6GNVS2Bs1lCq4yvuNkax5+UwZ4Z6Z4YZeZEciRkUgjeZJ81oe6p/Si2VMk+a0PdU/pRbCsAyAMAyAMAyAMAyAMAyAMFfGYPS5v9SpTlFtpwlbbxWx/FFkAlxUwWAjRzmnKU5u85zd5StqXwXBFoyAtu+WAZARgGQBgGQBgGQBgGQAAAAAAAAAAAAAAAAAAAAAADXZd83ftRNia3Lvm79qIHPIMwmZYBBhMGGiJ6TPET0iiRM0uVZQUnNvU0tiu2blFHKMU2k+H/ZWuPlXyfW/pzck9VrfHYRUKzlUsoTXCSer4ou4CKzZLdaxYpqK2JdZqOtj3ZqOtbjYUKqkrb0lc12IqZ0Wi1go2jfj3LYVx5Yt3PEmZzjzJjHPXlsjkz0yNjE1v8k+a0PdU/pRbKmSfNaHuqf0otmWgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANZl/zd+1HvNmav8Qv/AOs/aj3gc7FmWyKLPe4D0mLnlC5MXSVaEedKKvsu0j3GpF7JJ/FHK/imv/VUVe8Yr4XbOfcm94kWvpukitsor1tFHKdaEs3NnGT17JJs+fk+Cr6Oopbt5rDjcuunjj5QbitjXx1mwwmKi/J3o0EI52yTjJGxgoqCSbc+LJHW8vbb155qvwRzmE/E9Sk5Z15JvVdXt6tZtbynGUZ701ddaOPxmDdGpKnJ61v3NbmVztuPo+AxbqU1J6m795Zzz5/g8qOnFLSz1cLNE+JypGrTcHVkr21+p3LrHxdvJkWcc9+HKzScM9yjrave3wubrPKy6nJPmtD3VP6UWypknzWh7qn9KLZhoAAAAAAAAAAAAAYuLlLKFGUp0ZRTajO8rPXZJ2/exUw9N0qMKlWWilCFpybTu5SzpJa+Oz1gX8p4xYfD1a0tapwcrcbLYS4asqlOFSOycVJepq5ratBYnAuNaWfF50nZqzUW3FNrbsV7b0Xsn4aNGjCnC+bFeTfXZcALIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMNi5UypRlUoTjBXk1q1/B/tcqUsNNyqOScf6ilCUtkYxppRe3jdtesDbM53FZTjisA6sdmlcezJrus/ibHA1dPGslVz4OyjJNZyvHW9X7Gt/8KqeDnTw0H5VW+bnK3ktq6u+CQGniyS+onhkbFL/AEf/AGh9zNXJeLS1UJTfBTprvkjXSIEzxOdtbskKmByjrUMG11yqUu5SNbiPw5lOo/Loyl1aSlb5ZxOmpNaDLeLU6tRRd7yjr3NKP3NUdJW/BeUW7rDf5KXiI/5Jyl/tv8lLxEhWgBv/AOScpf7b/LS8Q/knKX+2/wAtLxFRVyXjoq0ai17FJnQU8RT9bvqNT/JOUv8Abf5aXiNtk78NZQjFxq4d3XNlpKT+D8oasWIajkctYpVcROS2K0V8DqsXkPKUqbjDDu7b16Sls7Rpv5Jyl/tv8lLxBeTn7i5v/wCScpf7b/JS8Q/knKX+2/yUvEGU34exn9PNlJeTLV6rG6WIT2ST9TNB/JWUv9t/lpeI9r8I5Sir/wANZLbapS8RdZx9RyT5rQ91T+lFsqZJ81oe6p/Si2ZaAAAAAAAAAAAAAGDEopqzSa6z0AIMTFKjNJWWZLuJKXNj6l3HjF/lVPYl3HulzY+pdwHsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGA0ZAHmMEtiS9SIcFzH7c/qZYK+C5j9uf1MCcGQBgGQBgGQBgGQBgGQBgGQBgGQBgjxH5c/ZfcSkeI/Ln7L7gIMk+a0PdU/pRbKmSfNaHuqf0otgDBkwBSyflHTyqx0coaKeZK7i7ysnqs+DRdNBkfG04VMY5yspYlyjdPWtHBXXHWmW3UzsVSqRVW0qElrjPMV3FxvuT1MDaA57K8a7pUViMzO/iqNnRz+ZnLOvfXx+BXeeo4mEM/RQxsM5Rzrqjmwz83fa+ds6wOpByWVE9DjtApaDRU9HZStpru+YtuzN2by3lnC040Kcqed/UxFBtpy2Z3lS6tV7gdEDj8XDMjjHDPTpYqjorOXkxbp5+auHPv6mdgmAIqGJhUTcJJ2dnxT4NbiUq4nAqctJBunVWycd64SX6l6wsxLi/yqnsS7j3S5sfUu41mKykqdKccTalLNklL/Tnq3Pc+p/ubHDVFKnGUWmnFNNbHqBZYlAAQAAAAAAAAAAAAAAAAAAFL/yH/wBr+H0cr5mfn3jm5t7cb7eouGlq4mEMp50pWSw2a3Z2ztJe1+Nhi61OrUjKEasndRU4xm4xUZXvG2q7va/BAboGpx+n/h8VptFmaOejzM/O2O1779mw1eHVSE4RoOWfPJzevOd6yzc1tv8AVzgOqBzmES0mF0SnzZfxWdnc3M/XfbLOt17SLJmGg8myqPOdSNGtF3cr622lb5WA6gHJ4WhHSYeMs/MlgXKpdzs5rNs5f3c/r1G7/D1SUsDh3Ntz0Uc7Ove6Vne+8DZEMcTBzdPOWekm477cVxJivisJCqlnLWtcZJ2lF8U9wWZ7TkGC5j9uf1MrLE1KDtX8qnurJbPbW71rV6ixgGnTundOU7P/AJMFmLIACAAAAAAAAAAAAAAAABHiPy5+y+4kI8R+XP2X3AQZJ81oe6p/Si2VMk+a0PdU/pRbAGDIAwDIAwDIAwDIAwDIAGAV8RT00UoztBvynHa1wT3AVMuYGGLwzg5eQrybjrbzU7WfrK+T6NfBxzEtNhopZlraSKe636rfM2leCjQnGKSSg0ktiViSMVKmk9jjZ/INTlcz0xh8RCpFShJSX/7U+DJDVQydUjKTjPNmubU2qa6NSO9ritZZw+OvLR1Y6OruTfky64Pf6toM+l0GDIZAAAAAAAAAAAAAAAAYBkAYBkAYBkAYBkwBkwQ1qzSejSnNNLNzkrX48DxPDOpGKqyd1tUG4xf/AGBNVqwjz5RV+LSNRh6bopuhOOuUm6MpJJ+U9cOi/wBjbPDQdrwi7Kyuk7L4lfC0ISheUItqc9qT/WwsuJMLjI1bpXjNc6EtUo+tf97CwUcZkqnVqKrnThVWycJWfqtsaNfjMvvBSjDFxupcypT2Nb86L2P5hqcfl/FvweYyTSa1pq6MhhkAAAAAAAAAAAAAI8R+XP2X3EhHiPy5+y+4CDJPmtD3VP6UWypknzWh7qn9KLYAAAAAAAAAAADAK+IpOtFKM7Qb8px2tcE9wDEUnWilGdoN+U47WuCe4mpwUYqMUkkrJLYkKcFGKjFJJKyS2JFKvWlWk6VJ2itVSot39sf7u4CLKGIlVjUp0XZRjLSVOGrmx4y7iTBYmUHGjWflNf057FNW2dUlw37SepRjTw8oQVoqErL4M9VMNGrSUJq6aXrT3NPc1xKiaUbpp7GrFKGEcoypVkqkFbMk+d8etcRhsRKE1RrO8n+XU3VFwfCXUXZRumuKsRZWvz6uH516tHpbakF1r9S69vrL1GrGcVKDUovY07ohwkKkLwm86K5k762uEutcd5FVwLjJ1KDUJvXKL5k/Wtz61+4a6q+CphccpyzJxdOqtsJb+uL3otBLMZAAQBgAZBgyAAAAAAAAAAAAAAYIKmIWkVJJuTTba/THi/iTTlZN63ZXstpFhHJ04yqJKbV2lu4L4AZw+HjSjmxXW3vb3tvezxicdCm1HXKb2Qiryfw3LrZ4x2JlHNp07OrPm32RS2zfUu+xJhMJGknbXKWuU3zpPi2UQqeKnshSpr+6TnL4pWX7kGDWKUG06M/KnqanH9T33fcbQgwPMftz+pgQxyjmtRrwdJvUm3eDfBS+9ifEYWnVSVSEZpO6zknZ8SWcFJOMkmnqaetM19O+GqRg23Qm7Qb1uEug3we75DybiCvGWCSlSTnQcvKpt8y++D3Lqer1GywuLhVTcHs1ST1Si+DW4llFNWaunuNf/DOrKbd6daErRqRW2O1X4rqfAjW75bIFCljZQkqeIShJ6ozXMn6nufU/3LwSzGQAEAYAGQYMgAAAI8R+XP2X3EhHiPy5+y+4CDJPmtD3VP6UWypknzWh7qn9KLYAA8yvbVt3XA9ArWrcafyl9xatxp/KX3AsgrWrcafyl9xatxp/KX3AsmCvatxp/KX3IcVQr1I5ufCKb8qyldrhe+oCbE0nWilGdoN+U47WuCe4mpwUYqMUkkrJLYkVqMaiilB0c1KyspWt1ayDKSxWieY4X1XzVLOzd+br22A9160q03SpNqK1VKi3f2x/u7i3QoxpxUIK0VsRUwSloo6F0tHbVZS/fXtJ7VuNP5S+5Ue8X+VU9iXce6XNj6l3FTFKtop66fMlulw9Z7pKtmrXT2LdL7kVLicPGrBwmrp/Bp7mnuZWw2IlTmqNZ3k/y6m6ouD4S6t+4ntW40/lL7lTKSeiemdPN3WUs7O3ZtnfO4WKNjKN01xVuBXwkKkLwm86K5k762uEutcd5DgVitFHSunn213Tv1Xs7XJpRrNNN07NW2S+5B7xWFhVjaa2a01qlF8U9qZV09TD6q150t1VLWvbiu9fsZwUMTGFpum2m0rpt5t9V3faT2rcafyl9wsqeE1JJxaaexrWmZNU8FXpSc6Dpq926TUsyT6tfkv1EuFxlSpeN4QqR50JRkpL99a61qBZ9NgzSUZ1c+MZaS2dWcud7MI361dmzarcafyl9zU5MWU9K9M6Ohznm5y/qZu7m6gjZYec41Y0W85Kind866dtb3l0j0Ec/Pt5VrX324EgAAAAAABXarX1Onb1S+5i1fjT+UvuBZBWtX40/lL7i1fjT+UvuBZBWtX40/lL7i1fjT+UvuBnFqbUVC6vOOc+Eb3f2+JOUa1PEOdNqVO0W3LVLotLfrJrVuNP5S+4EOAWfUq1nvk6ceqMHZ/+2cXjV5IVbQRs6fOnultz5X38S5atxp/KX3LRYIMDzP8AnP6mYtW40/lL7kGDVbM1Onzp7pdJ9ZBfIcZh1VpSg/1LU+D3NepmLVuNP5S+4tW40/lL7gYyfXdSjCT5zVpe0tT/AHRmeeq0La6bjJS6ndWfeUslKto5WdO2lq7pdN9Z7xUq6qUUpQ1zd7KWvyHt1ii/VpRnFxlFSi9qaumUdHVw/MzqtHoN3nBf2t85dT19xZtW40/lL7i1bjT+UvuFlesNiYVY50JXX7p8GtzJTW18BVctJTnTp1N8lGVpLhKN9feYo42tn6OqqdOo+bdScZ+zK+v1bQufTZmmxkqqq1EnNJyopWTazc685K3VqNjatxp/KX3NTUWU/wCKeidHQar6Rb9+bbXb1hle0k6cqdm7Va0rqV20mm1bhs2dZsCKVBTzXNJyjrT16nxRKBkAACPEflz9l9xIR4j8ufsvuAgyT5rQ91T+lFsqZJ81oe6p/Si2AAAAAAAAAMGQBVo4XR1G4O1OV24blLjHh1osRkmrp3XFGStRwujqNwdqcrtw3KXSjwvvQENehKjN1qKunrqU1+r+6P8Ad3luhXjUgpwd4vYz3GSaundFHEUJUZutRV09dSmv1f3R/u7y+UWsX+VU9iXce6XNj6l3EFSvGph5Tg7xcJWfwZmpiY0qSlN6rLZtb3JLeyK94rExpQc5uy+bb3JLeythsNKc1WrK0v0Q2qmv+5cWMNhpTmq1ZeV+iG6mv+5cWXioFWhTqSqOpUealdQgnqt0pcW/2FCnUlUdSo81K6hBPVbpS4t/sWiKGQABWxWDhVSvdSXNnHVKL6mWQBro4udHycRZx3VkrR/5r9L69nqNgncNXVnrTNf/AAk6Dvh7OG+lJ2S64Pd6tnqDXVbEGEZDIAAAAAAAAAAAAAr4uk5ZjTtmTUnd2Vtaa/cnPNakpwlCWtSTT9TI8LJKOjc86cElJ7Hs1MCvgHmVKtF7pOpHrjN3f/tcvlTHYeUs2pTsqsLuN9kk9sH1P7HrCYyNVO14zjqlB86L4P7lFgr4HmP25/UywQYHmP25/UyCwQYzEKlSlN/pWpcXuXxdiSpUUU5SaSWtt7EUaSeJnGo01Rg7wT1Ocum1wW75gWMn0HTowg+cl5XtPW/3ZmVWWnUEvJUHJu2+6SS/cnIMG5uLlU1NybUejHcvX9wJzIAAir0IVIuM4qUXuZKANderh9udWo8dtSC6+kv39ZdoVo1IqUJKUXsaPZSrYFqTqUJKnUeuSt5E/aXHrWsNdXyvAjoSk4Jzjmy3q97fEkDIAABHiPy5+y+4kI8R+XP2X3AQZJ81oe6p/Si2VMk+a0PdU/pRbAAAAAAAAAAAAYMgCrRwujqNwdqcruUNylxjwvvRYjJNXTujJWo4XR1G4O1OV3KG5S6UeF96AqZQw8qUalWirqUZaSn0tXOjwl3kmCw0puNatzkvIhtVNW/eT4/AtYmSdGbTusyXcySlzY+pdxdHoq0KdSVR1KjcUrqEE9VulLi+4UIVJVHUqNxSuoQT1W6UuLf7FogGQAAAAAAAYMgDBkAAAAAAAAAAAAAAAwQVaSUnVjG9RRasnbOW2xYMAR4euqkVKN/U1Zp700R4nBQqNSd4zWycXaS+PDqeo9YmhKSThNwktaa1p9TW9GKmJcJKMoTadvLirq/WlrQECp4mGypTqL++Li/nHV+xDg/4pw1aGPlT1+VL9T3ai/8AxVPPzNJHP6Ocr/IhwVeFs3PjnZ8/Jur857i6PMcnKTUq03Wa1pPVBPioLV8Xcut22kFPGQlNwjnNq93myzU1uzrWPNOlOakq6g4y/QtaS63vIMNafMlGf9JO7tdZzT1a+HeWglbUjIAAAAAAMGQBgyAAAAAjxH5c/ZfcSEeI/Ln7L7gIMk+a0PdU/pRbKmSfNaHuqf0otgAAAAAAAAAAAAAAwZAGvq4V01VcHanKE3KHCVtseF96PeDhUk1UqNxSVoQT3dKXFv8AYnxf5VT2Jdx7pc2PqXcB6MgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwZAHlwTd2k2tjsVsDSjmuWbHOz567K/Oe8tlfA8z/nP6mBOZAAAAAAAAAAAAAAAAAAEeI/Ln7L7iQjxH5c/ZfcBBknzWh7qn9KLZpsm1pfw1HX/pQ+lFnTy6QGwBr9PLpMaeXSA2ANfp5dJjTy6TA2ANfp5dJjTy6QGwBr9PLpMaeXSYGwBr1Xl0u4aeXSYGwBr9PLpMaeXSYFrF/lVPYl3HulzY+pdxrsTWlo563zJdx6p1pZq8p7F3AbIGv00ukxppdJgbAGv00ukxppdJgbAGv00ukxppdJgbAGv00ukxppdJgbAGv00ukxppdJgbAGv00ukxppdJgbAwUNNLpM5LLWMbxlem8TiYVNFB0IU6kowlKzbTS3u3xCybcd4D5VLKknQhmYzGPEObTjpaubZ822vbsOj/D+LX8XWp08TiK0YUoqWlnKSVRStLNvu6w1y4Z7jsjJr9PLpMaeXSYYbAGv08ukxp5dJgbAGv08ukxp5dJgbAGv08ukxp5dJgbAGv08ukxp5dJgbArYHmf85/UyDTy6TIMHWlmbXzp/UwNuDX6eXSY08ukwNgDX6eXSY08ukwNgDX6eXSY08ukwNgDX6eXSY08ukwNgDX6eXSY08ukwNgDX6eXSY08ukwNgDX6eXSY08ukwNgR4j8ufsvuKenl0meK1aWZLW+a+4Ctk3zaj7qH0oo4+nWhUrVacW1KlTScW3JSg5O2Yk207patzZtcmU1/DUNX+lT+lFrRLgBy88FjbZ0ambOea561e+bLUuqLaXWlv328q4StNt0lGWfh50neWbmuVrS2a1tN7olwGiXADmcTQxVOLkqk3FZ14x1vNU6eaopRbTzc/jtPFKjj5xhJTcU6ctUnaV3GdrprU03Ddu+B1OijwGiXADXPCXlSk6lS9NPVnap3VvLW802MyLXnWqOMo6KUtUb28iokqy9fkpr1s6rRLgNEuAHMSwmUM2X9ZXztzS1Wlrjw2w1dXrvbyZha9OrVdWSlCUpSjZ2s3K7bXF/taxvNEuA0S4Ac/kDJs8Mpxkko2jm64OTavduUYq62Wvr2m3LOiXAaJcAKwLOiXAaJcAKWJ/Ln7Eu490+avUu4lxVNaKer9Eu4906SzY6ty7gIAWdFHgNFHgBWBZ0UeA0UeAFYFnRR4DRR4AVgWdFHgNFHgBWBZ0UeA0UeAFYFnRR4DRR4AVz53+L6zp5TVSNrwVKSvsutaufTNFHgUsTkTC1p59WhCcrWvJXdgOHli9FnZTjRhmVoqnTp5vkxq/qa9WY3ffnes9f8Ax5+dX93H6jtHkDBuKg8PDMTuo681Pja9rkuEyThqDbo0YU21ZuKtdDRkFnRLgNFHgBWBZ0UeA0UeAFYFnRR4DRR4AVgWdFHgNFHgBWBZ0UeA0UeAFcgwfMftT+pl/RLgV8HTWY9X65/UwMAs6JcBoo8AKwLOiXAaJcAKwLOiXAaJcAKwLOiXAaJcAKwLOiXAaJcAKwLOijwGiXACsCzolwGiXACsea3Ml7L7i3olwPFamsyWr9L7gI8l+bUPdU/pRaKuS/NqHuqf0otAAAAAAAAAAAAAAAAAeKsM6Mo8U180ZirJLgj0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEdGnmK1762/m7kgAAAAAAAAAAAAAAAAAAAAR1uZL2X3EhHW5kvZfcBDkvzah7qn9KLR8wofj3F06cIKnQtCKirxneyVtflEnKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSwfNOUPGejw/Zn4xyh4z0eH7M/GB9LB805Q8Z6PD9mfjHKHjPR4fsz8YH0sHzTlDxno8P2Z+McoeM9Hh+zPxgfSyOtzJey+4+ccoeM9Hh+zPxmJf/IOMaa0eH1q3Nn4wOTAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAB//9k=\n"
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 14
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "xfsfYgYDqvwj"
      },
      "source": [
        "Some of you might have already come across L1 and L2 regularization before in other courses. L1 and L2 are the most common types of regularization. These update the general cost function by adding another term known as the regularization term.\r\n",
        "\r\n",
        "***Cost function = Loss (say, binary cross entropy) + Regularization term***\r\n",
        "\r\n",
        "Due to the addition of this regularization term, the values of parameters decrease because it assumes that a neural network with a lower parameter values leads to simpler models. Therefore, it will also reduce overfitting to quite an extent."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "hSX-9e1WsGvI"
      },
      "source": [
        "Discuss among your teammates wheter the above assumption is good or bad?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XnnxxMYHswhK",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "l1_l2_assumption = 'kkk' #@param {type:\"string\"}"
      ],
      "execution_count": 15,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zEHr7hyns-9K"
      },
      "source": [
        "##Unregularized Model"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2aIo8e8NQfWQ",
        "cellView": "form"
      },
      "source": [
        "#@title Dataloaders for Regularization\r\n",
        "data_path = pathlib.Path('.')/'afhq' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=train_transform)\r\n",
        "\r\n",
        "#Splitting dataset\r\n",
        "reg_train_data, reg_val_data,_ = torch.utils.data.random_split(img_dataset, [30,100,14500])\r\n",
        "\r\n",
        "#Creating train_loader and Val_loader\r\n",
        "reg_train_loader = torch.utils.data.DataLoader(reg_train_data,batch_size=batch_size,worker_init_fn=seed_worker)\r\n",
        "reg_val_loader = torch.utils.data.DataLoader(reg_val_data,batch_size=1000,worker_init_fn=seed_worker)\r\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ltJEADW-ou6H"
      },
      "source": [
        "Now let's train a model without any regularization and keep it aside as our bencmark for this section."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MM2k5L7QPrO4"
      },
      "source": [
        "args = {'epochs': 150,\r\n",
        "        'lr': 5e-3,\r\n",
        "        'momentum': 0.99,\r\n",
        "        'no_cuda': False,\r\n",
        "        }\r\n",
        "\r\n",
        "acc_dict = {}\r\n",
        "model = Animal_Net()\r\n",
        "\r\n",
        "val_acc_unreg, train_acc_unreg,param_norm_unreg,_ ,_ = main(args,model,reg_train_loader,reg_val_loader,img_test_dataset)\r\n",
        "\r\n",
        "##Train and Test accuracy plot\r\n",
        "\r\n",
        "plt.plot(val_acc_unreg,label='Val Accuracy',c='red',ls = 'dashed')\r\n",
        "plt.plot(train_acc_unreg,label='Train Accuracy',c='red',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_unreg),c = 'green',ls = 'dashed')\r\n",
        "plt.title('Unregularized Model')\r\n",
        "plt.ylabel('Accuracy (%)')\r\n",
        "plt.xlabel('Epoch')\r\n",
        "plt.legend()\r\n",
        "plt.show()\r\n",
        "print('maximum Validation Accuracy reached:%f'%max(val_acc_unreg))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Zlj0-w2r-PIx"
      },
      "source": [
        "##L1/LASSO Regularization"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "m-TABViwyd7P"
      },
      "source": [
        "L1 Regularization uses a Regularization Function which is the sum of the absolute value of all the weights in DLN, resulting in the following loss function ( L  is the usual Cross Entropy loss):\r\n",
        "\r\n",
        "\\begin{equation}\r\n",
        "L_R=L+λ∑|w^{(r)}_{ij}|\r\n",
        "\\end{equation}\r\n",
        "\r\n",
        "At a high level L1 Regularization is similar to L2 Regularization since it leads to smaller weights (you will see the analogy in the next subsection). It results in the following weight update equation when using Stochastic Gradient Descent (where  sgn  is the sign function, such that  sgn(w)=+1  if  w>0 ,  sgn(w)=−1  if  $w<0$ , and sgn(0)=0 ):\r\n",
        "\r\n",
        "\\begin{equation}\r\n",
        "w^{(r)}_{ij}←w^{(r)}_{ij}−ηλsgn(w^{(r)}_{ij})−η\\frac{\\partial L}{\\partial w_{ij}^{r}} \r\n",
        "\\end{equation}"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sNzq1uXyNn0y"
      },
      "source": [
        "###Exerice 4: L1_reg\n",
        "\n",
        "Write a function which calculates the L1 norm of all the tensors of a Pytorch model."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Of28m1kJMYe-"
      },
      "source": [
        "def l1_reg(model):\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in all missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Complete the l1_reg function\")\n",
        "    ####################################################################\n",
        "    l1 = 0\n",
        "\n",
        "    for ... in ...:\n",
        "        ...\n",
        "\n",
        "    return l1\n",
        "\n",
        "##uncomment to test\n",
        "# net = nn.Linear(20,20)\n",
        "# print(f'L1 norm of the model: {l1_reg(net)}')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-sGK_ddrPLD9"
      },
      "source": [
        "[Click for Solution](https://github.com/CIS-522/course-content/blob/main/tutorials/W05_Regularization/solutions/W5_Tutorial1_Ex04.py)\n",
        "\n",
        "Example Output:\n",
        "\n",
        "![Screenshot from 2021-02-12 02-55-09.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Kt4XPvcIQL_c"
      },
      "source": [
        "Now, let's train a classifier which uses l1 regularization. Tune the hyperparameter lambda1 such that the val accuracy is higher than for unregularized model."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "koVWFXjNks44"
      },
      "source": [
        "args = {'epochs': 150,\r\n",
        "        'lr': 5e-3,\r\n",
        "        'momentum': 0.99,\r\n",
        "        'no_cuda': False,\r\n",
        "        'lambda': ...\r\n",
        "        }\r\n",
        "\r\n",
        "acc_dict = {}\r\n",
        "model = Animal_Net()\r\n",
        "\r\n",
        "val_acc_l1reg, train_acc_l1reg,param_norm_l1reg,_,_ = main(args,model,reg_train_loader,reg_val_loader,img_test_dataset,reg_function1=l1_reg)\r\n",
        "\r\n",
        "##Train and Test accuracy plot\r\n",
        "\r\n",
        "plt.plot(val_acc_l1reg,label='Val Accuracy L1 Regularized',c='red',ls = 'dashed')\r\n",
        "plt.plot(train_acc_l1reg,label='Train Accuracy L1 regularized',c='red',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_l1reg),c = 'green',ls = 'dashed')\r\n",
        "plt.title('L1 regularized model')\r\n",
        "plt.ylabel('Accuracy (%)')\r\n",
        "plt.xlabel('Epoch')\r\n",
        "plt.legend()\r\n",
        "plt.show()\r\n",
        "print('maximum Validation Accuracy reached:%f'%max(val_acc_l1reg))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "nFdSGZd1edc9"
      },
      "source": [
        "What value of Lambda worked for L1 Regularization?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ksv9-jEzeddA",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "lambda1 = 'jjj' #@param {type:\"string\"}"
      ],
      "execution_count": 16,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-HuDsreduuem"
      },
      "source": [
        "##L2 / Ridge Regularization"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ik7S1rjp3cIz"
      },
      "source": [
        "L2 Regularization is a commonly used technique in ML systems is also sometimes referred to as “Weight Decay”. It works by adding a quadratic term to the Cross Entropy Loss Function  L , called the Regularization Term, which results in a new Loss Function  LR  given by:\r\n",
        "\r\n",
        "\\begin{equation}\r\n",
        "LR=L+λ∑(w^{(r)}_{ij})^2\r\n",
        "\\end{equation}\r\n",
        "\r\n",
        "In order to get further insight into L2 Regularization, we investigate its effect on the Gradient Descent based update equations for the weight and bias parameters. Taking the derivative on both sides of the above equation, we obtain\r\n",
        "\r\n",
        "\\begin{equation}\r\n",
        "\\frac{\\partial L_r}{\\partial w^{(r)}_{ij}}=\\frac{\\partial L}{\\partial w^{(r)}_{ij}}+λw^{(r)}_{ij}\r\n",
        "\\end{equation}\r\n",
        "Thus the weight update rule becomes:\r\n",
        "\r\n",
        "\\begin{equation}\r\n",
        "w^{(r)}_{ij}←w^{(r)}_{ij}−η\\frac{\\partial L}{\\partial W^{(r)}_{ij}}−ηλw^{(r)}_{ij}=(1−ηλ)w^{(r)}_{ij}−η\\frac{\\partial L}{\\partial w^{(r)}_{ij}}\r\n",
        "\\end{equation}\r\n",
        "\r\n",
        "where, $\\eta$ is learning rate."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "3EWNnjoDOCRG"
      },
      "source": [
        "###Exerice 5: L2_reg\n",
        "\n",
        "Write a function which calculates the L2 norm of all the tensors of a Pytorch model."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lm27RmzgNET2"
      },
      "source": [
        "def l2_reg(model):\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in all missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Complete the l2_reg function\")\n",
        "    ####################################################################\n",
        "    l2 = 0\n",
        "\n",
        "    for ... in ...:\n",
        "        ...\n",
        "\n",
        "    return l2\n",
        "\n",
        "##uncomment to test\n",
        "# net = nn.Linear(20,20)\n",
        "# print(f'L2 norm of the model: {l2_reg(net)}')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "fc9hIj9yP2hf"
      },
      "source": [
        "[Click for Solution](https://github.com/CIS-522/course-content/blob/main/tutorials/W05_Regularization/solutions/W5_Tutorial1_Ex05.py)\n",
        "\n",
        "Example Output:\n",
        "\n",
        "![Screenshot from 2021-02-12 02-57-57.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QsPcCqORQjCi"
      },
      "source": [
        "Now, let's train a classifier which uses l2 regularization. Tune the hyperparameter lambda1 such that the val accuracy is higher than for unregularized model."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ciBXMW_lvLc1"
      },
      "source": [
        "args = {'test_batch_size': 1000,\r\n",
        "        'epochs': 150,\r\n",
        "        'lr': 5e-3,\r\n",
        "        'momentum': 0.99,\r\n",
        "        'no_cuda': False,\r\n",
        "        'lambda':...\r\n",
        "        }\r\n",
        "\r\n",
        "acc_dict = {}\r\n",
        "model = Animal_Net()\r\n",
        "\r\n",
        "val_acc_l2reg, train_acc_l2reg,param_norm_l2reg,model ,_ = main(args,model,train_loader,val_loader,img_test_dataset,reg_function1=l2_reg)\r\n",
        "\r\n",
        "##Train and Test accuracy plot\r\n",
        "\r\n",
        "plt.plot(val_acc_l2reg,label='Val Accuracy L2 regularized',c='red',ls = 'dashed')\r\n",
        "plt.plot(train_acc_l2reg,label='Train Accuracy L2 regularized',c='red',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_l2reg),c = 'green',ls = 'dashed')\r\n",
        "plt.title('L2 Regularized Model')\r\n",
        "plt.ylabel('Accuracy (%)')\r\n",
        "plt.xlabel('Epoch')\r\n",
        "plt.legend()\r\n",
        "plt.show()\r\n",
        "print('maximum Validation Accuracy reached:%f'%max(val_acc_l2reg))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "R_Cm6_P7erDt"
      },
      "source": [
        "What value of Lambda worked for L2 Regularization?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4UlpzdFCerDv",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "lambda2 = '' #@param {type:\"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GFz0QB0hd387"
      },
      "source": [
        "##L1+L2 / Elastic net regularization\n",
        "\n",
        "Elastic Net regularization uses both L1 and L2 weights for regression. The loss function becomes:\n",
        "\n",
        "\\begin{equation}\n",
        "LR=L+ λ_{1}∑|w^{(r)}_{ij}| + λ_{2}∑(w^{(r)}_{ij})^2\n",
        "\\end{equation}\n",
        "\n",
        "The weights update equation then becomes:\n",
        "\n",
        "\\begin{equation}\n",
        "w^{(r)}_{ij}←(1−ηλ_{2})w^{(r)}_{ij}−ηλ_{1}sgn(w^{(r)}_{ij})−η\\frac{\\partial L}{\\partial w_{ij}^{r}} \n",
        "\\end{equation}\n",
        "\n",
        "where, $\\eta$ is learning rate."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8fMhf6SFQ3jS"
      },
      "source": [
        "Now, let's train a classifier which uses both l1 and l2 regularization. Tune the hyperparameters lambda1 and lambda2 such that the val accuracy is higher or on par with other models."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RcKdgtMeTLFB"
      },
      "source": [
        "args = {'epochs': 150,\n",
        "        'lr': 5e-3,\n",
        "        'momentum': 0.99,\n",
        "        'no_cuda': False,\n",
        "        'lambda1':...,\n",
        "        'lambda2':...\n",
        "        }\n",
        "\n",
        "acc_dict = {}\n",
        "model = Animal_Net()\n",
        "\n",
        "val_acc_l1l2reg, train_acc_l1l2reg,param_norm_l1l2reg,model ,_ = main(args,model,train_loader,val_loader,img_test_dataset,reg_function1=l1_reg,reg_function2=l2_reg)\n",
        "\n",
        "##Train and Test accuracy plot\n",
        "\n",
        "plt.plot(val_acc_l1l2reg,label='Val L1+L2',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_l1l2reg,label='Train L1+L2',c='red',ls = 'solid')\n",
        "plt.axhline(y=max(val_acc_l1l2reg),c = 'green',ls = 'dashed')\n",
        "plt.title('L1+L2 Regularized Model')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()\n",
        "print('maximum Validation Accuracy reached:%f'%max(val_acc_l1l2reg))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Gf7d4ghye11P"
      },
      "source": [
        "What values of Lambda worked for Elastic Net Regularization?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "ltNWmda-e11Q",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "Elastic_net = 'ghjg' #@param {type:\"string\"}"
      ],
      "execution_count": 17,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_Sb_0_vMbBuU",
        "cellView": "form"
      },
      "source": [
        "#@title Visualize all of them together (Run Me!)\r\n",
        "plt.plot(val_acc_l2reg,c='red',ls = 'dashed')\r\n",
        "plt.plot(train_acc_l2reg,label='L2 regularized',c='red',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_l2reg),c = 'red',ls = 'dashed')\r\n",
        "plt.plot(val_acc_l1reg,c='green',ls = 'dashed')\r\n",
        "plt.plot(train_acc_l1reg,label='L1 regularized',c='green',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_l1reg),c = 'green',ls = 'dashed')\r\n",
        "plt.plot(val_acc_unreg,c='blue',ls = 'dashed')\r\n",
        "plt.plot(train_acc_unreg,label='Unregularized',c='blue',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_unreg),c = 'blue',ls = 'dashed')\r\n",
        "plt.plot(val_acc_l1l2reg,c='orange',ls = 'dashed')\r\n",
        "plt.plot(train_acc_l1l2reg,label='L1+L2 regularized',c='orange',ls = 'solid')\r\n",
        "plt.axhline(y=max(val_acc_l1l2reg),c = 'orange',ls = 'dashed')\r\n",
        "\r\n",
        "plt.title('Unregularized Vs L1-Regularized vs L2-regularized Vs L1+L2 regularized')\r\n",
        "plt.xlabel('epoch')\r\n",
        "plt.ylabel('Accuracy(%)')\r\n",
        "plt.legend()\r\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_PgiiHQMW1lP"
      },
      "source": [
        "Now, let's visualize what these different regularization does to the parameters of the model. We observe the effect by doing a frobenius norm of the model parameters"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HDWlKxzbz0zC",
        "cellView": "form"
      },
      "source": [
        "#@title Visualize Frobenious Norm of the Models (Train Me!)\r\n",
        "plt.plot(param_norm_unreg,label='Unregularized',c = 'blue')\r\n",
        "plt.plot(param_norm_l1reg,label = 'L1 Regularized', c='green')\r\n",
        "plt.plot(param_norm_l2reg,label='L2 Regularized',c='red')\r\n",
        "plt.plot(param_norm_l1l2reg,label='L1+L2 Regularized',c='orange')\r\n",
        "plt.title('Parameter Norm as a function of training Epoch')\r\n",
        "plt.xlabel('epoch')\r\n",
        "plt.ylabel('Parameter Norms')\r\n",
        "plt.legend()\r\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Sb5LGL0hk5Ln"
      },
      "source": [
        "In the above plots, you should have seen that even after the model acheives 100% train accuracy the val accuraices are fluctuating, this would indicate that the model is still trying to learn something. Why whould this be the case?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0DtbL8VOkuLd",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "variation = 'ghghjghkl' #@param {type:\"string\"}"
      ],
      "execution_count": 18,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FifeU3xZ2h0k"
      },
      "source": [
        "#Section 6: Dropout\n",
        "(Time Estimate: 105 min from start)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NCn6Gc9q38gv",
        "cellView": "form",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 519
        },
        "outputId": "bf1cb79e-d76a-4ee7-9e3a-9f3cea4fef07"
      },
      "source": [
        "#@title Video : Dropout\n",
        "try: t6;\n",
        "except NameError: t6=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"a4nX4T-3xsc\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": 19,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Video available at https://youtube.com/watch?v=a4nX4T-3xsc\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"854\"\n",
              "            height=\"480\"\n",
              "            src=\"https://www.youtube.com/embed/a4nX4T-3xsc?fs=1\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.YouTubeVideo at 0x7facc45634a8>"
            ],
            "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBoYFRgaGRgdHRsdHzAfHx8fGCUnJSclLzMyPC4nLCs3RVBFNzhLOisvR2FFS1VWW11bMkFlbWRYbFBaW1cBERISGRUZLhoXLVc2MDZXV1dXV1dXV1hXV2JXV1dXV1dXV1dXV2RkZFdXV2RkY1dXV1dXV11jXFdkV1djV2RXV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAQUBAAAAAAAAAAAAAAAAAQMEBQYHAv/EAEgQAAIBAgEHCQYDBAkDBQEAAAABAgMEEQUSFCExkdITFkFRU2FxktEGF1SBk6EiMkI0UnKxBxUjJENzssHwM6LCJWJ0guE1/8QAGQEBAQEBAQEAAAAAAAAAAAAAAAECAwQF/8QAIBEBAQEAAgICAwEAAAAAAAAAABEBAiESMUGBEyIjA//aAAwDAQACEQMRAD8A5+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACsraTXRvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvGiy7t4FEFbRZd28aLLu3gUQVtFl3bxosu7eBRBW0WXdvKtvk+dSWanFPDHW2BaAyqyBW/ep75eh6Xs7W/ep+aXoBiAZjm5W/ep+aXoQvZ2t+9T80vQDEAzK9mq/wC/T80vQ9L2Xr/v0vNL0AwgM/H2RuH+uj5pcJXp+w11LZUoeefCBrINsX9Ht4/8W388+EP+j28X+Lb+efCS4NTBtL9grtf4lDzz4Tw/Ye6X+JQ88+Eo1kGycybrtKHnnwjmTddpQ88+EDWwbJzJuu0oeefCQ/Yu5X+JR80+EDGw2LwJIhsXgSGQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAL3JMcay/hZZGQyJ+0L+FgZpU30Pee4RKyiRCIaeWiFEqtEJEUiitCJ5iivTQFxbwMnQ1FhRReU2Z1V6pESkUVIpzuIr9S+TJFlepspSCuIvYwzWM7im0Qz0zyyo8tlOTPbZTkBzWGxeBJENi8CSsgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZHIX7Qv4WY4yWQV/eV/CwNlSCR6SCDSMDzhrKmBDRlUxRWgUolWIVc02XMJFnBlaMsCKtsp37T5OL/if+xZxnLDZqLC6qSeLTzW3i36Ftb15U5a5ScX+8xmO2dYz0ZvrLy1r46sdT2GGrNSpxx/V0Yk2H4G81SS25r1/NHWOfNsLPEj1jjsPLMuTwylIqSZSkwlc4hsXgSRDYvAkIAAAAAAAAAAAAAABIEAAAAAAJIAAAAAAAAAAAAAAAAAAEgQAAAAAAAAAABksgftK/hZjTJZA/aV/CwNoCA6RrSTyz0eZGWnpHtM8I9xKKsGTXrKEG28MdS8TzE83lPOpSXVr3Az32xNWOdDVhj1mPVpPOxnNyXeXkaqjFYlCrX6eha3h/IlejOOavpW3K0oYbYJreVrG0nCP4puXU30FK0u4Om817cN/eXcK23HUzpnpz552ydFpxxT1MmTKdpHCnFfPeepFjz15kynJnqR4ZIVzqGxeBJENi8CTIEkEgbFk2ys45NldXNGdRqtyeEKjjq1YdKXSerSOSrmpGiqNxbzm82FR1M5Zz2J4t/wAi5yZZqvkSUHVp0v7zjn1JYR1Yajxa+ztK2lSurm8ouhGalHk8XnyjrUU/FdGOwK13KNlK2r1aM3jKnLDFbGtqfzTRb4as79PX0bzacnRjlDKF5dzpOcKceUjRwxc2lhCL8uOHWev6wyzn5zt5uHY6OuTzf3dmP3A1QJam+hbX0LxNkyhkGH9aW9GMXTpXKVTM2OC1ucO78v3PWU/aetSrzo2uZRoUZOnGCpxeObqbePegjWMRibNlu5VbJFnUVOFNyuJ50YLCOd+PFpdGL14d5Ry/+w5K/wAmX/iFa/iZinYU3kqpcYPlY11TTznhmvDVhs6S4yKv/S8qeFP+bJpf/wAKt/8AKX8ohGvral0vYg9Tweproe02PIMr3RnodGnTWc8+5lmpy6opy1ai+vYXFfJ11p0FytDNnSqpRxlFvXg1qa27OsDTcS8yXG2dZaXOcKOa3nQWvO6Oh6tv2M5km5jRyPXqOlCo43SzVPXFSzYYSa6cNuB69l7+pdZWjUrNOXIyjqiksFs1fMK1abSb16sdWO3A85661vMhkzLNe0z+RlFZ+GdnQUtmOG3xNprZcuLfJ/KXMou4uF/Y0+TUc2H78t+OHh3hGjt4bSZRawxTWOzFYGY9mncKdRWtvCpVwWFWaX9kteL16tf+3SbHk6N7WnO3v4xrUKlOTVRZjzZrDDCUdj27eoDU8i5OV1WnTz8zNpSq45uP5cNX3KGS9HlVhpM5QovHOlDW1q1dD6cOg2D2HypWTnQxXJxoTqpZqxz8Y9Pz2FH2dyjUusr2tWs05YSjqiksFCfR8wrX6qjnyzG3DOea3tccdTffgef0536evo3mfyJZ0ql3eVq6zqVsp1ZQ/ewlLBPu1Mjnjecpnf2fJ9jyazM393Hb8wjACOtpLW3sS1mw5csqVO7s6tBZtK5zKsYYflxlHFLu1ovPaTLta3vq8LdQpPGOfNQTnN5q2t9GGCw7gNS/2BsntJV0izsLucYqtUz4TcVhnZrwT+33NbABMGw5U/vWTba6/wASg9Hrd6/TJ/bzAa82XtaNrotJwnUdznPlINfgUdeDTw8OnrMrkt6Lky4uf8S4ej0fD9Ul/wB3lRTv1/6LY/58/wDyAwSWLwWt9S2kda6VtNpyVLKGiQ0SlToU+ms8yMqr68ZHvLkKtbJqrXdNRuaNZU8/NSc4NdOGprWtnUFamAAgAAAAAGRyD+0r+FmOMhkR/wB4X8LKNoTDespxb6/semNae8QzyzA5Wy+6NSdKFNNpa5OXS11fMzFZ+M09jT+ZWic10mXRgvkRpM+icl4SaLDXTs5La0vF4HieUKEfzV6S8akTmMpN7Xj4kGojcbmrCc55klKOc8GnitZYOo5J4ReHdrLDJzlyVTB9xdW+OOrWnt8TG465y6XVpCfQvlgZO1qyznGX6S2tqUkul/M8X1zyUVUlqbebh1o6ZjHLW10ZYQj4HpyNIp5bXTVqNd+H+xQ0mM7hVI1Z52KeCTeweTG8W8TkecS2pVs6EX0taypGZWGgw2LwJIhsXgSc2gAAbFGa/qGccVnaSnhjrw1dB59mbynONSwuH/Y19cJP9FToa6scN/ia/gANgyJcRsrq4triebCrF0ZVIS/K9ebUTXRr+WJVnkDKOd+G5zqfRV0xqGHW9eK3M1pIjNXUtwGWhdu1v6dR1ncqjP8AOpSeMcPxKOd4vuMllL2dderO4tK9CVCq3UxlVzXBvW1JYdZrUGlJZyxWKxSeGK6Vj0GZqW+Sqks9V69FPXyboObj3KSx/wBwLvLVGFPJFlCnUVVK4njNLCLl+POze7HFY9JWqZPlf5PstHlBzt4unUhKai1s1/b7mKy1lOlVp0Le3hKNvQxzc/8ANKT2yf33sxDQVtdvZxtsm5Rpyr0pVpKGdCEsVHXqWPS9uKWzUWdKa/qOrHFZ2lJ4Y68MI9BgMF1DAI2itbSvsm2cLZxlK3TVWhnxUs7ong9T6fMerS2lbZLyhSq1IKbUWqSqKTjr6cNWL6l1GqtBRXUFZ6hNf1HXjis7S08MdeGENeBPsTNRyhFyaS5OetvBdBgMA1iEZ32SybTr13OtKPJ0UpZkpJZ8v0rX0atfyLnKuRry5rTrVKlti9iVysIxWyK1f8eJrLSe1EZi6luCtjyGuXydd2lKcY151FUSlLN5SGEcY4//AFervLv2UybVtLtuvKFDOpyXJurHOns14RbWC631mpNYkZq6luCM97EySu3GUlF1LedOOc8PxPNwX2ZXyDk6pZ5VtIV3CLwk9VRNJZk1rfR/+mttDBdQGeyLf0qV3d067wo3OfSnL93FvCXhre89cz7jO/6tDke25VZub+9h1933NfIzV1Lr2BWwZayhSrXlpCg8aNtmUoS68JLF+Gpbij7WyUspXDTTTcdaeK/JEwwSCM9lSaeScnJNYqVTFY61+J7TAjAASZz2WuIupWtarwpXVNwxexTSxjL+f2MEAM77U3EOUpWtJ40rWmoJrY5tJyl/L7i/knkayipLOVapisda/NrwMEMANryzZTyhRtKtqo1IU6Spyo58VKnJbdTaXVuPFxSdHIs6M6sJTjcL8EailmLV+Hq79WrWau0uoYLqCgACAAAAAAX+Rf2hfwssC7ybWjTq50ngsGtjKNoiz0nrMasr0f3v+2T/ANiJ5ZorWnKTw6IvXvNdIyrZouXJY3dZ/wDuw3JGaucuTlqglBb2a/XpzlOUnrxeOLaJ00tgVdHn1fdEqhLq+6IqlgCo6E+r7oaPPq+6Ar5PucxuL/LL7MyUZ5r1bDDaPPq+6LyznJPNqL8PXtwI1nLqa2S1uMUunAxXtPVUuRXT+J/LVh/uVKF5CD24rwe4xF5KpWqSnJdyWrZ1G81ncxZ4la0uHSqRmtqf26UeeQl1fdDkJdX3QRultlGnKCfKRwezGST3F1C4jjhnLF68MTRIU5r9P3RlsjV1T5R1JZreCSwbT79QrMWUNi8CSIbF4EmFAAABcWVjVuJ5lGnKpJLFqK2Lrb2Iz/s5ketRylQjc0HGMlPDOSlFvNfTrWIGsAuo2dSrVqxpU3JxcpNRw1RT2kVrGtTpQqzpyjTqPCEpLDO1Y6lt2AWwMhZZDuriGfSoTlDolqSfg21iWt1a1KM3CrCUJr9Mlg/HvAogu6WS7iapuFGclVx5PBY52G3d3nu+yNc20VKtQlCL1Z2prHqbTeAFiD3SpSnKMIRcpSeCjFYtvuRkK/s9e04Oc7aaitbaweHik8QMYCpb0ZVZxhTi5TlqjFbX0nmpBxlKMlhKLcZLqa2oCCC7eS7jPhTVGXKVIZ8I6sZR613GV9pcgSt5RnSozVFUoucnLFKbbx2vwA18AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAASCAAAAAAAAAAJIAAkgAAAADAYEQ2LwJIhsXgSAJIJA2WtWla5HtlRbhK6nKVScXhLBY/hx6Oj7j2JvqunQpOpJ05qTcZSbWKTaax2MoZNv7etZ6HdzlTUJ59GsouWa3jimvm9/RgXWRalhZXUKjupVpYNZ0aUowgmnre1yb2ausKj2Qlhe3j1aqNR61q/MjE2lepe3dvG4qzmqlWKknJ4YN60lsWrVqLv2eyjSoXFzOpPNjUpTjF5reLb1LUYW3qypyhOLwnBqUX1Na0BmvanKVWd5VpqcoU6MuTpwhJxikunBdJcTrSu8jTnWblVtaqjCo/zOMsPwt9P5vsiL12N9Pl3caLVlhytOVJyi5L9UWv8AncW+Vco0IWsLK0cpU1LPq1ZLB1JdGC6vRAXmULyrSyPk+NObgpuak4vBtJvVjtwPPsddzqV52lSTqUK9OSlCTbSaW1Y7Okr1o28slZOhcTnTznPMqxjnKLzn+aPSn3FG0uLPJ0KlSjcaTcyg4U8KbjCGPS8f+agPORHothf3UP8ArRmqEJYa4ptYteb7IxWTsr16FxCqqs285ZylNtSWOtPEuMhZTp0o1qFypSt66wm4/mjJbJr/AJ0Iure0ydQqKtO9deMHnRpQoyUpNbFJvV/IC8q2kKPtFSjBYRlPlMF0OUJY/fF/M1rKf7Tc/wCdU/1svoZbcspxvakdlTOcV0QwzcF4L7l1lC1ydKrUrq9k4Tk5ujCjLlMW8XFN6lrfSBPtfVlCVnKEpRkrOODjJprb0o9+2txPSKUOUnmO2g3HPea3jLW1s6C09qr6jczoOg/wxt1Taaf4Wm/w4vbhjtLrL1a0uoQuI3ObVjQjDkHTk25Rx1Y7FtevZsA1sABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwGBENi8CSIbF4HoCASAIJAAAAAQSAL+7ynytpbW+ZhyDl+LOxzs59XQY8kAQSAAIJAEEgAQCQBAJAEAkAQCQBAJAEAkAQCQBAJAEAkAQCQBAJAEAkAQCQBAJAEAkAQCQBAJAEAkAQCQBAJAEBkkMC5pXcFGK0ai8EtbdXX/3HvTIfC0d9XiLKGxeBIF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vENMh8LR31eIswBeaZD4Wjvq8Q0yHwtHfV4izAF5pkPhaO+rxDTIfC0d9XiLMAXmmQ+Fo76vEQ7yGD/utHfV4i0DAiGxeBJENi8CQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABgMCIbF4EkQ2LwJAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbAAp8uu8cuu8CoCny67xy67wKgKfLrvHLrvAqAp8uu89QqJvBAegAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwGBENi8CSIbF4EgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAiWx+BJEtj8AMj7NZBd7VUccFhnSfVHZs6WzMe0HshC0pKrCWfBNKSksGsdj1dGJhvZvLzsqqlhisM2S647dvQ8TbMq5XWUcmVZ0MVyU061N65Zq14+HT8mY35ezjuZvGSfPppejw/dRseQvY2F1RVapLMjJvMUVi3h0vE1yNeD2SNlyF7XK1o8jUg5xi/wOLSaT6HiTjm/L0f7eHj/ADl+mHyxkRWleVKSUtSlGS6YvY8Pky1o2Mak4wjBOU5KK8W8EXmV8rO7ryqywjqUYxXRFbF92eMjqVW6owoy/tM9NPDZhrbfcsCbnLNb4/j8P2l+mw3H9H0VQbjVxqpY4ZuEW+pdJo0aebUa6joeXPbenRlWoUlnVIfgz8VmqXTgunB/yOeRqZ1RvrN57fP5d8bsqqADTgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAYDAiGxeBJENi8CQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARLY/AkiWx+AFoZf2Yytol3GUv8ApT/s6q6HB9OHdt3mIPVPDOWdsx1+AaZ7L2RJWlxUUISdB/jhNRbjmvYnLZq2bjFt4bTstrKlKhDk810nBKOGuObhsOMZSnB16qpf9NTlmfw4vD7HbjzmM7napGLk0optvYksW/BGxWFP+rLCrdyi43Nx/ZUIyWDiumWHRsx+S6y8/o2lTlK4xw5ZKOHXma8cPnhj8ir/AElypunbrFcspN4dKg1r++G4nPl5dGZHPn3630sqUPzfIplSh+b5HJpcAAMgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGAwIhsXgSRDYvAkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAES2PwJIlsfgBaAANMhkfLFW0r06kZSzYyxlDHVKL/MsOvAv/bDJsKVeNxR1290uVg1sTeuS++Pz7jAHSvYzJsLjJdNXKVWCrOdKMv0Zrww3pv5gYKo3kvJaivw3d7rk+mFJdHc9e9vqNUqVJSeMpNt7W3izZv6Q6EoZQUnNyVSmnFP9KWKzfDp+bNXFAqUPzfIplSh+b5AXAADIAAAAAAAAAAAAAAAAAAAAAAAAAAAAABgMCIbF4Ho6Jk/2YspW9GUqGLlTi28+e1pd5cc1bHsF9SfqFjmQOm81bHsF9SfqOatj2C+pP1BHMgdN5q2PYL6k/Uc1bHsF9SfqCOZA6bzVsewX1J+o5q2PYL6k/UEcyB03mrY9gvqT9RzVsewX1J+oI5kDpvNWx7BfUn6jmrY9gvqT9QRzIHTeatj2C+pP1HNWx7BfUn6gjmQOm81bHsF9SfqOatj2C+pP1BHMgdN5q2PYL6k/Uc1bHsF9SfqCOZA6bzVsewX1J+o5q2PYL6k/UEcyB03mrY9gvqT9RzVsewX1J+oI5kDpvNWx7BfUn6jmrY9gvqT9QRzINamdN5q2PYL6k/Uc1bHsF9SfqCOV8g+tDkH1o6pzVsewX1J+o5q2PYL6k/UK5XyD60ZjIuX7uxhKFKUJQk8c2pFtJ9awaaN85q2PYL6k/Uc1bHsF9SfqBzXKV1Wuq0q1aalN6tmCSWxJdRa8g+tHVOatj2C+pP1HNWx7BfUn6gcr5B9aPVKk08TqXNWx7BfUn6jmrY9gvqT9QjmQOm81bHsF9SfqOatj2C+pP1BHMgdN5q2PYL6k/Uc1bHsF9SfqCOZA6bzVsewX1J+o5q2PYL6k/UEcyB03mrY9gvqT9RzVsewX1J+oI5kDpvNWx7BfUn6jmrY9gvqT9QRzIHTeatj2C+pP1HNWx7BfUn6gjmQOm81bHsF9SfqOatj2C+pP1BHMgdN5q2PYL6k/Uc1bHsF9SfqCOZA6bzVsewX1J+o5q2PYL6k/UEcyB03mrY9gvqT9RzVsewX1J+oI5kDpvNWx7BfUn6jmrY9gvqT9QRzIHTeatj2C+pP1HNWx7BfUn6gjmQZ03mrY9gvqT9TzV9lrFRk+QWx/rn1eIIyOTP2W3/yof6UXRa5M/Zbf/Kh/pRdBQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADxW/JL+F/wAj2eK35Jfwv+QFDJn7Lb/5UP8ASi6OYUPb27p04QVO3whFRWMJ44JYa/xFT3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLBzT3h3nZW/knxD3h3nZW/knxAdLPFb8kv4X/I5v7w7zsrfyT4iJf0g3jTXJW+tYfknxAamAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//Z\n"
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 19
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zgsj1q_r-0Xp"
      },
      "source": [
        "\n",
        "Dropout involves injecting noise while computing each internal layer during forward propagation. The method is called dropout because we literally drop out some neurons during training. Throughout training, on each iteration, standard dropout consists of zeroing out some fraction of the nodes in each layer before calculating the subsequent layer.\n",
        "\n",
        "![Dropout](https://d2l.ai/_images/dropout2.svg)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "d9irrZLt16eM"
      },
      "source": [
        "Now lets revisit the toy dataset that we generated above to visualize how the dropout stabilizes training on a noisy dataset. We will slightly modify the architecture we used above to add dropout layers."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0k9J-Wjs28wz"
      },
      "source": [
        "##Network Class - 2D\n",
        "class Net(nn.Module):\n",
        "    def __init__(self):\n",
        "        super(Net, self).__init__()\n",
        "\n",
        "        self.fc1 = nn.Linear(1, 300)\n",
        "        self.fc2 = nn.Linear(300, 500)\n",
        "        self.fc3 = nn.Linear(500, 1)\n",
        "        self.dropout1 = nn.Dropout(0.4)\n",
        "        self.dropout2 = nn.Dropout(0.2)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = F.leaky_relu(self.dropout1(self.fc1(x)))\n",
        "        x = F.leaky_relu(self.dropout2(self.fc2(x)))\n",
        "        output = self.fc3(x)\n",
        "        return output"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VFAHGr5s5I_R"
      },
      "source": [
        "#train the network on toy dataset\n",
        "model = Net()\n",
        "criterion = nn.MSELoss()\n",
        "optimizer = optim.Adam(model.parameters(),lr = 1e-4)\n",
        "max_epochs = 10000\n",
        "iters = 0\n",
        "\n",
        "running_predictions_dp = np.empty((40,(int)(max_epochs/500)))\n",
        "\n",
        "train_loss_dp = []\n",
        "test_loss_dp = []\n",
        "model_norm_dp = []\n",
        "\n",
        "for epoch in tqdm(range(max_epochs)):\n",
        "\n",
        "    #training\n",
        "    model_norm_dp.append(calculate_frobenius_norm(model))\n",
        "    model.train()\n",
        "    optimizer.zero_grad()\n",
        "    predictions = model(X)\n",
        "    loss = criterion(predictions,Y)\n",
        "    loss.backward()\n",
        "    optimizer.step()\n",
        "\n",
        "    train_loss_dp.append(loss.data)\n",
        "    model.eval()\n",
        "    Y_test = model(X_test)\n",
        "    loss = criterion(Y_test,2*X_test)\n",
        "    test_loss_dp.append(loss.data)\n",
        "\n",
        "    if(epoch % 500 == 0 or epoch == max_epochs):\n",
        "        running_predictions_dp[:,iters] = Y_test[:,0,0].detach().numpy()\n",
        "        iters += 1"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Dw-rmoOoUOnA"
      },
      "source": [
        "Now that we have finished training, let's see how the model has evolved over the training process."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "16zDPWsr5PoP",
        "cellView": "form"
      },
      "source": [
        "#@title Visulaization(Run Me!)\n",
        "fig = plt.figure()\n",
        "ax = plt.axes()\n",
        "def frame(i):\n",
        "    ax.clear()\n",
        "    ax.scatter(X[:,0,:].numpy(),Y[:,0,:].numpy())\n",
        "    plot = ax.plot(X_test[:,0,:].detach().numpy(),running_predictions_dp[:,i])\n",
        "    title = \"Epoch: \" + str(i * 500)\n",
        "    plt.title(title)\n",
        "    ax.set_xlabel(\"X axis\")\n",
        "    ax.set_ylabel(\"Y axis\")\n",
        "    return plot\n",
        "anim = animation.FuncAnimation(fig, frame, frames=range(20), blit=False, repeat=False, repeat_delay=10000)\n",
        "anim"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "F7IfS-MR5X5O",
        "cellView": "form"
      },
      "source": [
        "#@title Plot the train and test losses [You may have to run this twice]\n",
        "plt.plot(test_loss_dp,label='test_loss dropout',c = 'blue',ls='dashed')\n",
        "plt.plot(test_loss,label='test_loss',c = 'red',ls='dashed')\n",
        "plt.ylabel('loss')\n",
        "plt.xlabel('epochs')\n",
        "plt.title('loss vs epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UsV9CU1z5Zjc",
        "cellView": "form"
      },
      "source": [
        "#@title Plot model weights with epoch\n",
        "plt.plot(model_norm_dp,label = 'dropout')\n",
        "plt.plot(model_norm,label = 'no dropout')\n",
        "plt.ylabel('norm of the model')\n",
        "plt.xlabel('epochs')\n",
        "plt.legend()\n",
        "plt.title('Size of the model vs Epochs')\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "aHhMJZMRhzUe"
      },
      "source": [
        "Do you think this performed better than the initial model?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "vaSGy-IZh84l"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "dp_diff = \"gghg\" #@param {type:\"string\"}"
      ],
      "execution_count": 20,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KBK9lE8P7WOd"
      },
      "source": [
        "##Dropout Implementation Caveats: "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "BwVYGYd57lVm"
      },
      "source": [
        "\n",
        "*  Dropout is used only during training, during testing the complete model weights are used and hence it is important to use model.eval() before testing the model. \n",
        "\n",
        "* Dropout reduces the capacity of the model during training and hence as a general practice wider networks are used when using dropout. If you are using a dropout with a random probability of 0.5 then you might want to double the number of hidden neurons in that layer."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "m1WEkOApU_oo"
      },
      "source": [
        "Now, lets see how Dropout fares on the Animal Faces Dataset. We first modify the existing model to include dropouts and then train the model."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "46FM-qNM4q3-"
      },
      "source": [
        "##Network Class - Animal Faces\n",
        "class Animal_Net_Dropout(nn.Module):\n",
        "    def __init__(self):\n",
        "        torch.manual_seed(32)\n",
        "        super(Animal_Net_Dropout, self).__init__()\n",
        "        self.fc1 = nn.Linear(3*32*32, 248)\n",
        "        self.fc2 = nn.Linear(248, 210)\n",
        "        self.fc3 = nn.Linear(210, 3)\n",
        "        self.dropout1 = nn.Dropout(p = 0.5)\n",
        "        self.dropout2 = nn.Dropout(p = 0.3)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = x.view(x.shape[0],-1)\n",
        "        x = F.leaky_relu(self.dropout1(self.fc1(x)))\n",
        "        x =F.leaky_relu(self.dropout2(self.fc2(x)))\n",
        "        x = self.fc3(x)\n",
        "        output = F.log_softmax(x, dim=1)\n",
        "        return output"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0qYd11zF4mr_"
      },
      "source": [
        "args = {'test_batch_size': 1000,\n",
        "        'epochs': 200,\n",
        "        'lr': 5e-3,\n",
        "        'batch_size': 32,\n",
        "        'momentum': 0.9,\n",
        "        'no_cuda': False,\n",
        "        'seed': 1,\n",
        "        'log_interval': 100\n",
        "        }\n",
        "\n",
        "acc_dict = {}\n",
        "model = Animal_Net_Dropout()\n",
        "\n",
        "val_acc_dropout, train_acc_dropout, _, model ,_ = main(args,model,train_loader,val_loader,img_test_dataset)\n",
        "\n",
        "##Train and Test accuracy plot\n",
        "\n",
        "plt.plot(val_acc_pure,label='Val',c='blue',ls = 'dashed')\n",
        "plt.plot(train_acc_pure,label='Train',c='blue',ls = 'solid')\n",
        "plt.plot(val_acc_dropout,label='Val - DP',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_dropout,label='Train - DP',c='red',ls = 'solid')\n",
        "plt.title('Dropout')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QYf2EW344KeD"
      },
      "source": [
        "When do you think dropouts can perform bad and do you think their placement within a model matters?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Hv7Td-PbxkVZ",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "dropout = 'ggwp' #@param {type:\"string\"}"
      ],
      "execution_count": 21,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "l2uz-1Yw85Qb"
      },
      "source": [
        "#Section 7: Data Augmentation\r\n",
        "(Time Estimate: 125 min from start)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6e--CYCZ8nMp",
        "cellView": "form",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 519
        },
        "outputId": "eab083f1-32b6-46a2-9b98-896396d180c8"
      },
      "source": [
        "#@title Video : Data Augmentation\n",
        "try: t6;\n",
        "except NameError: t6=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"NvwRMX6v-S8\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": 22,
      "outputs": [
        {
          "output_type": "stream",
          "text": [
            "Video available at https://youtube.com/watch?v=NvwRMX6v-S8\n"
          ],
          "name": "stdout"
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"854\"\n",
              "            height=\"480\"\n",
              "            src=\"https://www.youtube.com/embed/NvwRMX6v-S8?fs=1\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.YouTubeVideo at 0x7facc45621d0>"
            ],
            "image/jpeg": "/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEABALDBoYFhsaGRoeHRseHy0mIiIhIColJyUlLy0xMC0nLy01PVBCNTlLOS0tRWFFS1NWW11bMkFlbWRYbVBZW1cBERISGRYYLRsbL1c3NT1XV1dXV1dXV1dYV1dXV11aV1djV1djV11XV1dgX1dXXVdXV1dXY1dYV1dXXVddV11dV//AABEIAWgB4AMBIgACEQEDEQH/xAAbAAEAAgMBAQAAAAAAAAAAAAAABAUBAwYCB//EAEUQAAIBAgMEBQgHBgUEAwAAAAABAgMRBBIhBRMxURciQZLSMlJTYXGBkbEGMzRyc6GyFiNCwdHhFBVUYqMkQ6LxB2Pw/8QAGAEBAQEBAQAAAAAAAAAAAAAAAAECAwT/xAAeEQEBAQEAAwADAQAAAAAAAAAAARECEiExMkHwE//aAAwDAQACEQMRAD8A+fgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6ih9AsbUhCcXRtOKkrzfBq67DZ0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334R0eY7nR778IHJg6zo8x3Oj334TRjfoPi6EM85Ust0tJN8fcBzQLVfR+t51P4v+h7X0credT+Mv6AU4Lj9m6/nU/jL+g/Zut51P4y/oBTguV9Ga/nU/jL+h6X0Wr+fS+MvCBSAvo/RLEP+Ol3peE3w+hGKlwnR70vCBzQOqX0Bxb/AO5Q70/CH9AMX6Sh3p+EmxccqDp39BMWv+5Q70/CeX9CMV6Sj3peEqOaB0n7FYrz6Pel4R+xWK8+j3peEDmwdH+xWJ9JR70vCeX9DcSv46Pel4QPqmyfstD8KH6USyJsn7LQ/Ch+lEsADAAyAAAAAAAAAAAAAAAAAAAMADIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVP0mjfCv70fmWxV/SFf9M/vL5gceqb7H8TZTibUhCIHmxhI22MWMqJG2CPMUboIo30YlhRIVFEuDM1pLUjEma1IzcmDzI1yPbPDNRl4aPLPTPLKjyzVM2M1yAvNk/ZaH4UP0olEXZP2Wh+FD9KJZRUY3FYh4tUKEqUVud43UhKWua1tJI94TH1Y11h8TGCnKLlTnC+WaXlKz1TWnayNjsSqG0I1JxqODwzjeFOU9c97dVMzTlPFYqnWjTnClQhLI6iyupOStpF6qKS4vmZds9fPWJlPbWHlUVONVOTdk7PK5clK1m/Vc8YTbVOpXrUb23XBu+qteTd1pYo3OdSjRzTryr76m6lJ0rQg1UV7dXRLsd9SxnWnTxGNjCLdWcYypJxbjK1O3HhxQ1fCJ+G2xh6s1CFROUruOjWZLjlbVpe4k4nEwpQc6klCC4tuyOdpvPUwTjUr1Wqt556dlB7uS81ZdXaxbbcqVI0L07rrxzyjHNKML9aUVrdousXibI3YbalGrnyT1gryTTi0udmr2NjxtNU4VM6yVMqi9dc3k/G5S4eDniZ5J1akXhZRU6kbauXkqVlcjxxSnhMHRjTq7ynOgqidOSUMrSd21bs7Bq/5+/S8xG18PSqbudRKS1as3lT4OTWkfeJ7Xw8YQm6sVGpfI9etbkV+FxCw1XEwq06jlUqucJRpympxklZXSsmuGtiNsmm3HZjUXaMat9NI6O3sJq+Ezf74tP2gwmXNvla9npK8ezrK14+8k4vaFKjl3k7OXkpJylLnZLVlRUoy3O1Oo7zcsvVfW/cxWnPW54xEZ0sRCrKdSnTlh4QU4U1PLJNtxkrNq91r6hp4c34uHtOgqSrOrHdN2zX0ve1ny15niltjDzz5al8kc0llknl85K2q9aKmeGvRzwdWpvcXSlLPTy3tKKclFJaWXGxY1oP8AzClJJ2/w9RXtpfNCyuXU8eWMDt2lUw0a85KmtFJO+knwitNX7D3U23R3FWrCWbdrWLUk07aJq11f2FPhsTNYLDwjvIbuooV5Km81NdbVXXOyur2TNtGnKVTGuDq1IywsYwlOFnJ/vNFor8fzJrV4m1ZYfam9/wAM4OCjVvmUs2a6je0dNbPme6m2cNGo6cqqUk8r0eVS81y4J+q5XYapvXs+UVO0FKMrwksrVK2t1zI9OsqWBng50akq9pwyqnJqpKTdp5rWs7p3voNS8TV5itqUKMslSolPLmUbNyad+CXHg/gb8JioVoKpTkpQfBoq8Dh5QxqzJvLgqcXK2mZTldXN+woOMK101fE1Wrq2jk9S6zeZJ6WYAK5gAAAAAAAAAAAAAAAAAAAAAAABWbf+zv7yLMrdvL/p395Ac0kEjKQQCx5tqbDyZV6ijbE1o9xCpNNm51VFNvgiLBkbatVqCS4t/L/2RqfW9bTbeiSRsp46XbqUeFrSTtNpp8NErEmurySzST/2uxZHW5i/p1VJXRkrMDUaau730b5+ssjWOFYZ5Zlnlhl4ZqkbZGqQRfbJ+y0PwofpRLImyfstD8KH6USwrAMgDAMgDAMgDAMgCvxOy95KUlWrwU/KjGpZPS2l/J05WJeHoRpwjCCUYxVkl2JG0BdvxgGQEYBkAYBkAYBkAYBkAAAAAAAAAAAAAAAAAAAAAAAAAAAAK3bv2d/eRZFbt77O/vIDnEDCYvqKPRhi5hmGnpHtHhHpFG2LI+NnF9Xtt8DfEg7RhacZ842+BWufdVscM1Ntycr9j7PYWGIwW8knmceD0dmV1bENap6olYfH5tZS9VtPiJXbrJMWVskeLduZZUqilFSXBlTmzRLOl1YpG3DvG1s1tmWzwyY56xJmtnpnljE1f7J+y0PwofpRLImyfstD8KH6USyNAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAVn0g+zv7yLMq/pE/8Apn96PzA5tMX1NcW+f5Hq4Hu4PJz+2tt1aVV0qajZRV21d3ZMV0cZp8GvibInzXfS52G+l5z+LLg+mbyK4yS9rSIW08VScElVpuSd7Z43a4OyPnoTsXCXHSOSnJu7ST4c0TqdOMkv4ba3X87ldhYpKLevVWpcUcjVuywjpaj1sduqEr8V5L566G3ZX0ilWmoOPK7tb+ZG2vh8+HnbTKs3uXYczhq27mpJyVuXErnX1BT0GY4iltvnVqP22/kaViVPEKpGrPNdOyTb0Gs3l3MpGLkWlVvCLfFrU2RmVl1GyfstD8KH6USyJsn7LQ/Ch+lEsw2wVe1NuU8LOMJRlJtXeW2ivbt9jLQh47ZVDESjKrDM46J3a05O3FAS4TUkmuDV17GacfOcaM5U/KirrS97ate9G9KysuAaAq5bTaqTlpuVTbXNzUVN/wDi18DMdqZI2qK8oRW9leKs2k3ZXu7J/wDs9rY9PcxpXlljPNe+r14P1WeX2G2eAW8lOMnHM05LLF3aSV1daaJL3AbcViFTipZXK8lFKNrtydlx9pHltJRTUoOM1PJlbWrcc173ta2pKrUVNJPS0oy05xd18jRW2fGblK7UnNTT0dmo5eD4q3PmBihtGM5RjZpuTjxTSklmtdPtWq9hr/zWLtli22nKzlGPVu0nq+2zt/I21cAp0nTlOV275klFp+qy93vFXZ8W4yg8jUVDRRayrgrNdmvxAUceqk4xhGTThGebRJKWa3be/VNdfGzp1aqyOcIU4z0srayvx48Fp6iTSw6jNzu23CMez+HM76feZiphFJ1Hd/vIKD9SWbVd5gaa+0owkk43Ty65o3tJ2uo3u+J7/wAfGy0d3UcMul01e7fqsr+yxrqbMTzdeSjJxclaOrjZJ3a/2o2rAx30qut5KzXZfROXtaSXuA1Yfacakoq1lPyHmi76X1Sd1pqe8Tj1CeRRzSUcz60Y2Tvbi9Xo/gMLgd01abcYq0YtR0Xttd2M18FmnnjJxk45XpGV0r24rirv4gap7UVpShCU4xhGo5JpLLJN6Xd72R6q7RUXNqEpQp2zyVrLRN6N3dk03/M2TwUZKorv95BQfDRJNXXxNdXZyk5rPJQqWzxVrS0S42urpJOwGyWNilN2fUqKD4at5dV6usjW9o63VOThvFTz3j5WbLe172vpcVNnXlL95JRlOM3FWtmjl7bXt1VoR54Oo55Yqap71T1cMnlZnb+LV9nDUDbh8fK6U4O0q06anpbSUracbWja/M3V8Q4VkuMd1OTWl24uNtX7WZWCjaKu+rVdTs4tydvZ1mMVgo1XeTfkShpyk07/APigItTal4NwXWjOnonGWaM5paNO1+KN0tpKOZThKM45erda5rqLTvbsfHkYls3NmcqknKWTVKKtklmVtOZmWzVLM5zlKcnHr2Scct3GytbtftuwNb2jmcFHR71RmrqWjjJ6Nez8j1S2rCTjpaM75ZZou9k3qk7q6T/sbVgvJzSbcZ5+EVwTVrJcNTzQwChopNwV0o5Y8H2XteyA1wx0pzo2hKMKjbTdtVlbXbp2M3V8W1NwjCU2o5pWsrJ3S4vVuz09R5o7Pyum3UnJU75IvLorW1drvQ91sHmm5xnKDcVGVrPMle3FaNXevrAix2rko0pTV3KlGcmpRXFa2i3dm1Y+SlXzQ6tNpJ5oq90nrd+u/sPL2THI4KclF0o05aRbairJ6rR6nurs9Sc3nkszjK1k7Sjaz4epaAbcHi1VUmlZxlleqavZPRrirNHnE41U6igouUmr+VGN+yyu9X6jFPCShK8ZvrVM87parLlslb1IzjcFvk4ubUWrOOWL96utGB5q7QUXN5JOFN2nNWsu16Xu7X1NuJxO7ypRcpTlljFW1dm+L4KyZqqbOUnJZ5KE3ecFa0tEnra6vZXPWPouUYuKlmhK6cHFNaNPytHo7WYGt7TSWsGp58mVuK1UVJu97WsxHaSllUYSlKTlGycdHFJu7va2q4Guhs+UouU24z3jmm8ravFRs9Mrul2EqGEs4Scm3DN2JXzepIDR/mkbQtF5pJvK5RjbK7O7btx5HultBTlBQhJ5o5m9LRV7O+vG/K5h7NXVcZNSjm1sndSlmaaa5m2OESd22/3eR8Fdc9O0CJPa3UqZYdaNOU49aMl1eN7PRq60NlTaajo4PMoqUlmirJ30V3q9DMNmJKznKSVJ0lpFWi7clx0MvZ2qlvHmyqMnli8yV7OzVk1cDbWxcY0lUSclLLZRtd5mkuPtKnbWLc6E47uUclRKTbVr6NWtx0aLmtQU4qLbSTi9P9rTXyK/a+AnOjUVJOUp1FJq6XBJdvsA5eLPSepKjsbFeh/84f1MT2Ti46rDyk/VOn/OSNekaGziduzvi6vtS+CR2GJ2btOWlPC5FzdSk3+ooq30N2nOcpPDttu93VpeIi45wyi//YnaX+m/5aXiH7E7S/03/LS8QFAYOg/YnaX+m/5aXiH7E7S/03/LS8QEHAY1JKE/c/5F5hZZePDmQf2J2l/pv+Wl4idT+jm1VTUP8Ne3B72lw5eURudfqom3NoJQdKD1l5XqXI51nRVPobtOTbeG1b9LS8R5f0K2l/pv+Wl4jTNuufJGBrOFWEouzuvhfUt/2J2l/pv+Wl4jK+hW0v8ATf8ALS8Q1FzDHU0rOcVr2tEiGIje2ZXevEof2M2km2sN8atLxFnsj6M46lvJVaOTRfx02rK+ukhrOO+2T9lofhQ/SiWRNk/ZaH4UP0olmWgAAAAAAAAAAAAAAAAAAAAAAAAwZAAAAAAAAAAAAAAAAAAAAAAAAAAAADBkAYBkAYBkAYBkAYBkAYBkAYBkAYBkAYPGI+rn91/I2GvEfVz+6/kBo2T9lofhQ/SiWRNk/ZaH4UP0olgAYOf+kNfFxq01QU8mX+CN7yvwf5AdCeKlSMU5SailxbdkhSbyrN5Vle3PtPTV+IGqliqc3aFSEnxtGSbsbHJXSurvguZTU6mSk0uq54mrG+ZQSWeb1lZ2WnyNTxNX91KC3k1vorVPRNa9mbT4gdAYbtqymqYluUUqr3e6Uozc1DNJt5nquyy07Llg3J4a87ZnT62XhfLrb1ASYyTSa1T4MxGSd7O9nZ+p8ihnipRpxyzcd3Sp6OaV20npG3WXZx9hsq1cksQ41WqirLLTuus8sNLcXfgBdmIyT4NPW2nNcSo/xFR1ZddRkquVRc0llutMtru61ua1WlHLG+WEqlZt5lC8lUdlmtybfrsBeHneRy5rrLzvp8SqjXk3TVWtki4OSlGStN5rLrWs+rZ27bnqEmtnNp3aoyadl69bAWpkqKs6jlUaqzjlxEIJK1lGShfS3+5mJ1pRvTzy+ucVKU1HTIpWcrPnoBcApKGLlJUlUrOEGqnXTSzSjPKk5Wtw19Z4hi6ko0lKo1Fxk82dQzNTa427FZ2AvgUlXFTjklOqn+7i3GnNKV7vrKLXXuraerQm7SquKhFNxc5WvmyLg3rKzt7gJxgp8LVnVdKLqSSarXcWrvJUjGOtuXahSxDk6aq1nBbu6ldRzyUmnd+pJaesC3jJNJp3T7UeiJsr7NR+4vkSwAAAAAAAAAAAAAAAAAAAHmpK0W+SuejXiH+7n91/ICNRx2aVnFL3m2tXlFXUU/eUE6jjWnLNKypqyjfVvQ84jHzVJ2lHNJ9skpW9evH1Ix5Iuf8AMJeYu9/Yx/mMvMXe/scBicVVjUsqtRp8Hnkeo4mt2znbnnZdax3v+Yy8xd7+w/zGXmLvf2OEWNqaXqT7zPENqTjU1nPLfjmZdR3r2nLzF3v7Hp7Rat1Fb739jhtq1Ku7VSnVnbttJnvAYmpUws81SblrbrviuBR2r2jLzF3v7G2GMbTbitPX/Y4/ZtOrUTTqTzcF1pHUYXCSUOtJ3a5lElYz1ae0zLGJLhcpMXQlSoyi6km0rp31faV+xMVOplbk5dbW7diYOkltL/b+f9iwKpU03dFqgMgAAa8R9XP7r+RsNeI+rn91/IDRsn7LQ/Ch+lEsibJ+y0PwofpRLAGDIAwDIA8uKtaysZUVyNOIqONrNJO92037EbKcm4ptWbWq5AZcE+KXwMkbaTkqFRxeV5XZkF4qVNtZlo5K8m2l+8hHnykwLZwXJfAZVe9lcj4Ku5qd3GWWdlKOikrJ6avhe3uIiq1XVcVUjdVZJJxekciaTV9QLPKr3sr8w4pqzSsVlHaVScoaRV1B2dlfMrtq8r8+x8GI42tZSbp2cYytkfBys1fN+YFm4p9i0M2RX47Eypzbj5kePBXnbM9V8yLia9WUcymo5adSStqm4ta6St28Ne0C6sjDinxSZBVebnFxlFqc8l7Nq0YuTaV+Ld17iPDaE1GPWgtIdWV3KeZ2bTv/AFAsa+Hz2tKULebb5NNHqlh4xgoJdVcL6+/XtKuWJqxpy68dY1JJuLvHLNLV310Zbxd0ndPTiuDAOK5LQy4p6NXMgDFkYcVyR6AGDIAAAAAAAAAAAAAAAAAAAADTivqp/cfyNxqxP1c/uv5Aczua1Ss4x0iorW/ak9LFRRi3UdO9pKV1aKzPK9Y6+r5F1LaO5quLaUZRWuVtp+5rkVle6qPJKMovXPbV8ePxZm4NO36cZQVWN7rRt6aP1dnBlHRru3Wdy/23QUMIr3U6k7pduU5ynBt2gm328hBb0KCcbydl6nYh4rH0VK0YuXOxmlhn/FVWvYjGBpxheMks93x7UNV4jteKWXI4x5dhtwnk2g+q3expnhllk521k7ezsPGx6mSsot9WV7eoJrr/AKNw1belmdR2FZsqgnTUlon+ZZqPYbg5L6S4t53Fu0UrP3lVsfHQjUpU4xk1mWttLlh9McI41qc39XLt7M2mhA2PRUMQm+DkrfAg7GhUvw0LUpYwunrYukBkAADXiPq5/dfyNhrxH1c/uv5AaNk/ZaH4UP0olkTZP2Wh+FD9KJYAAAAABgyaMRGTtZSa1uouzv2dq9ZspJ5Vm4219oHo8VIRl1XzT+DuvzR4xae7llbT5pXfHkV7pVXFzSnGSp9XrPVqTte+r0to+eoFpmirK610SuZdlqVGIwcpZm1O+WpbK7cal1w5okbQpOWVxU3+7mrK/Fx0ugJ9kZsVdWnWjpFzcOq3q2+Es1ra8VHgN3WfGVTRU7W04zea655bAWlhYg/vI0H5TlGp7ZOCqfn1CPVqzc7PfJNTcVHSWmXK/wA38dQLVWWmnsNU8PCbUnd2tpmeXR3WnDiQnGpZSmpayUZ5PKyxi+FtbZ2+HYa4wrRiklUTy/u7cM+aV8/Zwy8dOIFvYFXJYjM11subInp5Lebee5Wj7bmclW3Gbc88Xd3UbzSi7dlotv3AWSknbXjw9ZlsralKqruN9JvLe2WMVTeX2K5pp3qTyp1cq3cnmeqb3mZ3+H8gLi5iMk9U7r1FWqVTNZ7xuUXBNttJZ2rv15bO4q06qdoqUYXnbLfyr9V6dlvdzAtFJXtdXXZ2mblWsLJVlNqb/fXbTdrOkle3K+h6rQmq0nFVLuUMrXkNaZr+6/H3AWYIOAnOTlmbap9RO/ltcZfC3vuednupnefMk4ptSvpK7uk3/LQCwBVreZHffZrreW4Wvrk93LW3rPM51owlZVHeFRQ0vK9+pfk+PH36gWrYTKmbqTlNR3jeeafmZMr05Xvb1+42UI1VVSeZJNW0bjkyrTle/vv6gLIJlbWnN1pKLqXU4Wt5CWma/uvx9x4Ua6yqKaSjqtF19ctvVwv7vWBatmSppQqvK3n0lF2ad08srvX2r1ErZ2fK1PNo9HK+uiu7PVagTAAAAAA1111J/dfyNh5n5L9gHLbRorNzvFXNezcPDMp1NILydPKa5+pE/aNO83FaNrtJeCpQjRs1GTX+0xb7xLXLfTGrmqxa4ZDm8PaKu21L8joPpC5Vq9orSOmnYiHRpxprrK5VioqTfu9Rtp4yTVpWklzXyZvxEKEpNRUk/UZo7PvLRNL1vUK2U6MKls1KrZ8pXRb7I+jzq1FJ093Ri+295epE7YVGUHwVvYdJvdPWaiazFKKSSsl2cjcmVGNxNaGqSmuSeq9xTS2/N3vFxs7asNTnXV4ijCpFxmlJPmrnN1tkOhXhKNGMo5vL5e4l7JxtSq2+EV2vt9hcyk2tCpZioqSdmX6KivBLVluiIyAABrxH1c/uv5Gw14j6uf3X8gNGyfstD8KH6USyJsn7LQ/Ch+lEsAAAAAAw2lxBqr0nJprK7X0krrX/APfM9045YpXvZWuB6MOaSvfQ1YunKdOUY2u128PXzIC2fNfwwkuv1ZPRZrWeitfR9naBYuvC0XfSVreu/A2XRWQwE1KN4wdpQlnv1kopJxWnqfb2m3EYSUq2eKjqrNt37GuFrp69jAm5lzRidWKtd8Xb3vsK+GzcrTSgmt3ql5vlfE84bZ9SLTeX+C9rauLbbskuYFlTqxlGM0+rJJp8NHqjOl1wv2EOOCbp4eElFqlbMnqnaDj82jR/l00krQekUpX1p5Xfq6f0AsozT4evjpwPSaK1YCanGSy3T1d76Z5S4NaO0uKa9fA97NwUqT6zu1FRumut62lFa+1viwJ4MgDB5hTjFWjFRXqVj2AMAyAMAyAPMYpKySS5LQyZAGAZAGEkDIAxZAyAMAyAAAAAAAeKjtFvkmezXX8iX3X8gOW2ti6jrrLfyVw/PQzhdp1JStdJWtaxmvQvVndzhLKrO1ovjdX/ADEKSs1GWZxu9I8F6+1Hm6/JmxT7SqyjOSutXe5WVVUqaaJdup0eJwKqL19ppjgYwVkjrGlLg8Hl4RbfMu8ItVm095lUWYdK3bb2FF3RqxitDE8VbtZTKbXaeKlSXMuosq+KXr+JQYtwlWjd8eOpmVSctLmmGCUpXd2+Y1qOjwVWEYpLgWWHxSuv6lFhKWWPFk7CU7zj7S6ixksz9RaIj0oJeskFGQAANeI+rn91/I2GvEfVz+6/kBo2T9lofhQ/SiWRNk/ZaH4UP0olgAAAAAGupVUePbwsm38Ee4u6uuDPFSlms7tNdq9fZqeoRUUkuCVgPRgxNNrR2fO1zzCMk9ZXXK1u1/ysvcBsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAPMldNc0ejzJ2TfJAVmPoyck4QWeKspPRJfzPU9muWSUms8Vq0vK5Xua8djIRfkNtxt7E+Nj3Wx8HDqu0uXI43NpsesVFKmpSjld7e0gVYKWqR7xMnOMbO7avbkesOupqb5uxKgSViHXLWvC5W1olRHbSNE56+o2VY2IuJrRje7/uRW+MiRSK3C4yErarV29/ItqLuXFSKEGyfSag4+010VGxCrVJxxFNXvBy+BvB0WGrdaxNKFVbMvkQZAAA14j6uf3X8jYa8R9XP7r+QGjZP2Wh+FD9KJZE2T9lofhQ/SiWAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA8VV1ZexnswBX1MLGVrp6e0ytn07Wy/G9+ZPBjwiYiSowSdkaI0l5r/MsgakwxWTw0X/C/wAzX/gIea/zLcFMUGI2bT45JP4kBfRqhU61SnN8lmmjrgWXByL+imGj1oUpRktV1p/1J+H2fCyeSS+JfgW6YpqmFja2SX5kJ4CKeZQndO68pnTAiucnCfmy7rOjQAGQAANeI+rn91/I2GvEfVz+6/kBo2T9lofhQ/SiWRNk/ZaH4UP0olgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGCipbYnmruSVqO9eW1pVVGbisrfZFJJvm1w7b0rZYnDNTvC8YObct03DNdqpra17t356ga/8yq75wyU8sVNyvNrK4wpyte2q671PMNrVbqUqKUXCMmnPWKlOUVJaa3STtoTqlDD1VeUaU07u7UZJ6ZZP4aM8YZYXLlpqko5nC0VFLMndxt7Xf3gR6e2r6ulJU7+W7pKObK27pJatPi9L8jViNtyVOUlTyJ0pTpym31nlco2VrPRXavf1dpNhHDShKeWmo1Y3m2ks0Zedfsd3xNksPQk3JwpttZW7J3TXk/B/mBDW24tJqDtO6pa6zkpqGVq3Vd3wetk72sy1IUqFCc4VM0dJtqzjZzayZvXKza9/sPVbEUcJSgpdWCWWKSb0jFv8oxb9wEwGudaMZQi3rN2j62k5fJM1wxtNt9a2VtNvRaOz1frAkAjrGUnJx3kbxipPrLSL4S9mhs38NFnj1vJ1Wvs5gbAAAAAA14j6uf3X8jYa8R9XP7r+QGjZP2Wh+FD9KJZTbNrS/wANQ1/7UP0ok7+XnAWAK/fy84b+XnAWAK/fy84b+XnAWAK/fy5jfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwBX7+XnDfy84CwMEDfy845LbWMbxlem8TiYVN1F0IUqkoxlKzbTS7Xb3hZNuO8B8qltOToU8mMxjxDm0476plyvyba8eB0f0exa/xdanTxOIrRhSSlvZuaVRStLLfs9Ya64z9x2Rkr9/Lzhv5cwwsAV+/lzG/lzAsAV+/lzG/lzAsAV+/lzG/lzAsAV+/lzG/lzAnlRX2NKc6jzU4qopJ5INZ82izq9pW52v7CRv5cykrYqbq19zOspRhJSu5vNLTyIvRZVfVdr0vqBZ4nY8pSqOE4RzxqRacLpRmoXtZrW8PzM/5RJTUoyhG1RyTUXfLKSk4vWz4W1XJ9hW06k7rd1a25jWjlvKTunHrXcus4p83xvyRu2PiG3VlCdTdu2WNSU5S0vebz6q/L1X0bA3w2DKMUlVvaUZda98yTja6d8tnoux343PctgppRz5YbpxcYp6VMriqibd9Iyat6lyJO/l5zG/l5zAj09kyhaa3bqRu0mpZXLKoxerduHZ2G7auzZYjJaai4Xaum8s9HGolfirdumrPW+l5zG+l5zAVNnbyNBVGp7p3le7zvI434+u5ohsRJrWOVTzZcv/2byxv38vOY38vOYEZbFad4yh2NJwurxqSmk9eFpW9qTENh2Wso3zQlpDg413WaXJday9hJ38vOY38vOYEyhnyLPbP25eBsK/fS85jfy85gWAK/fy85jfy85gWBrxH1c/uv5EPfy85nitWlklq/JfyAjbN+zUPwofpRBx9OtCpXq04tqVKCTi25KUHJ2yJNtO6WnY2WuzKa/wANQ0/7UP0olbpcgOXngcbbNGplnOznqrp2lovVFyS9aXb2y9q4StNt0lGWfDzpO8suVytaXDVcS93UeQ3S5AczicPiqcXJVJuKveMdXlU6eVRSi2nlz8+J4pUcfOMJKbinTlpJ2ldxnlumtGm4dnZ7jqd0uQ3UeQFc8JeVKTqVL009M2k7q3XXaU2M2LXnWqOMo7qctI3t1KiSrr29VNe1nVbpchulyA5iWE2hlkt8r5uxpadbWPLjDT1e28vZmFr06tV1ZKUJylKNnazcrttc3+VrF5ulyG6XIDn9gbOnhlOMklC0cusXJtXu3KMVdcLX14luSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARgSd0uQ3S5ARz539L6rp7UVSNrwVOSvwutUfTN0uRDxOxcLWnnq0Kc5WteSu7AcLLF7rNtNUYZK0VTp08vVhW/ja9mRu/bm9p7/wDjz66v+HH9R2j2DhHBQ/w8Mid1Gzyp87czbhNk4eg26NGFNtWbirXQ0ZBJ3S5DdLkBGBJ3S5DdLkBGBJ3S5DdLkBGBJ3S5DdLkBGBJ3S5DdLkBGMkjdLkN0uQEcEjdLkN0uQEYEndLkN0uQEYEndLkN0uQEYEndLkN0uQEYEndLkN0uQEYEndLkN0uQEYEndLkN0uQEY81fIl91/Il7pcjxWprJLT+F/IDXsv7NQ/Ch+lEoi7L+zUPwofpRKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa63kS+6/kbDXW8iX3X8gNOy/s1D8KH6USj5hQ+nuLp04QVOhaEVFXjO9krecbOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lg+adIeM9Hh+7PxjpDxno8P3Z+MD6WD5p0h4z0eH7s/GOkPGejw/dn4wPpYPmnSHjPR4fuz8Y6Q8Z6PD92fjA+lmut5Evuv5HzjpDxno8P3Z+MxL/5BxjTW7w+qt5M/GByYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAP/9k=\n"
          },
          "metadata": {
            "tags": []
          },
          "execution_count": 22
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_AIkE-h63IlZ"
      },
      "source": [
        "While training alpha-zero you were introduced to data augmentation. There data augmentation was used as a tool to increase the number of training samples. Now we will explore the effects of Data Augmentation on regularization. Here regularization is acheived by adding noise into training data after every epoch.\n",
        "\n",
        "Pytorch's torchvision module provides a few inbuilt data augmentation techniques which we can use on image datasets. Some of the techniques we most frequently use are:\n",
        "\n",
        "\n",
        "*   Random Crop\n",
        "*   Random Rotate\n",
        "*   Vertical Flip\n",
        "*   Horizontal Flip\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "K8upaZ4Mm8Pg",
        "cellView": "form"
      },
      "source": [
        "#@title Data Loader without Data Augmentation\r\n",
        "train_transform = transforms.Compose([\r\n",
        "     transforms.ToTensor(),\r\n",
        "     transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))    \r\n",
        "     ])\r\n",
        "data_path = pathlib.Path('.')/'afhq' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=train_transform)\r\n",
        "\r\n",
        "#Splitting dataset\r\n",
        "img_train_data, img_val_data,_ = torch.utils.data.random_split(img_dataset, [250,100,14280])\r\n",
        "\r\n",
        "#Creating train_loader and Val_loader\r\n",
        "train_loader = torch.utils.data.DataLoader(img_train_data,batch_size=batch_size,worker_init_fn=seed_worker)\r\n",
        "val_loader = torch.utils.data.DataLoader(img_val_data,batch_size=1000,worker_init_fn=seed_worker)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dkth2fG1pVAF"
      },
      "source": [
        "Define a DataLoader using [torchvision.transforms](https://pytorch.org/docs/stable/torchvision/transforms.html) which randomly augments the data for us. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "xX7jk4FEbbU3"
      },
      "source": [
        "##Data Augmentation using transforms\r\n",
        "new_transforms = transforms.Compose([\r\n",
        "                                     transforms.RandomHorizontalFlip(p=0.1),\r\n",
        "                                     transforms.RandomVerticalFlip(p=0.1),\r\n",
        "                                     transforms.ToTensor(),\r\n",
        "                                     transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))\r\n",
        "])\r\n",
        "\r\n",
        "data_path = pathlib.Path('.')/'afhq' # using pathlib to be compatible with all OS's\r\n",
        "img_dataset = ImageFolder(data_path/'train', transform=new_transforms)\r\n",
        "#Splitting dataset\r\n",
        "new_train_data, _,_ = torch.utils.data.random_split(img_dataset, [250,100,14280])\r\n",
        "\r\n",
        "#Creating train_loader and Val_loader\r\n",
        "new_train_loader = torch.utils.data.DataLoader(new_train_data,batch_size=batch_size,worker_init_fn=seed_worker)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "6cW8KjkpvnqS"
      },
      "source": [
        "args = {'epochs': 250,\n",
        "        'lr': 1e-3,\n",
        "        'momentum': 0.99,\n",
        "        'no_cuda': False,\n",
        "        }\n",
        "\n",
        "\n",
        "acc_dict = {}\n",
        "model = Animal_Net()\n",
        "\n",
        "val_acc_dataaug, train_acc_dataaug, param_norm_datadug, _ ,_ = main(args,model,new_train_loader,val_loader,img_test_dataset)\n",
        "model = Animal_Net()\n",
        "val_acc_pure, train_acc_pure, param_norm_pure,_,_ = main(args,model,train_loader,val_loader,img_test_dataset)\n",
        "\n",
        "\n",
        "##Train and Test accuracy plot\n",
        "\n",
        "plt.plot(val_acc_pure,label='Val Accuracy Pure',c='red',ls = 'dashed')\n",
        "plt.plot(train_acc_pure,label='Train Accuracy Pure',c='red',ls = 'solid')\n",
        "\n",
        "plt.plot(val_acc_dataaug,label='Val Accuracy data augment',c='blue',ls = 'dashed')\n",
        "plt.plot(train_acc_dataaug,label='Train Accuracy data augment',c='blue',ls = 'solid')\n",
        "plt.axhline(y=max(val_acc_pure),c = 'red',ls = 'dashed')\n",
        "plt.axhline(y=max(val_acc_dataaug),c = 'blue',ls = 'dashed')\n",
        "plt.title('Data Augmentation')\n",
        "plt.ylabel('Accuracy (%)')\n",
        "plt.xlabel('Epoch')\n",
        "plt.legend()\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YVRtpSq6k2HH"
      },
      "source": [
        "plt.plot(param_norm_pure,c='red',label = 'Without Augmentation')\r\n",
        "plt.plot(param_norm_datadug,c='blue',label='With Augmentation')\r\n",
        "plt.title('Norm of parameters as a function of training epoch')\r\n",
        "plt.xlabel('epoch')\r\n",
        "plt.ylabel('Norm of model parameters')\r\n",
        "plt.legend()\r\n",
        "plt.show()"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "9BU0igCDpPwa"
      },
      "source": [
        "Can you think of more ways of augmenting train data?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UEtOQTCq1aPR",
        "cellView": "form"
      },
      "source": [
        "#@markdown Write down your discussion\n",
        "data_augment = 'kjkjkjkjkjkjkjkjk' #@param {type:\"string\"}"
      ],
      "execution_count": 23,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "VtRxB698CTfG"
      },
      "source": [
        "---\n",
        "# Wrap up"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "P5-HZSWcCbr3"
      },
      "source": [
        "## Submit responses"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "FCJJf7OFk8SU",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 421
        },
        "cellView": "form",
        "collapsed": true,
        "outputId": "06e09f06-4783-47db-896d-f30c54cccf15"
      },
      "source": [
        "#@markdown #Run Cell to Show Airtable Form\n",
        "#@markdown ##**Confirm your answers and then click \"Submit\"**\n",
        "\n",
        "import time\n",
        "import numpy as np\n",
        "import urllib.parse\n",
        "from IPython.display import IFrame\n",
        "def prefill_form(src, fields: dict):\n",
        "  '''\n",
        "  src: the original src url to embed the form\n",
        "  fields: a dictionary of field:value pairs,\n",
        "  e.g. {\"pennkey\": my_pennkey, \"location\": my_location}\n",
        "  '''\n",
        "  prefill_fields = {}\n",
        "  for key in fields:\n",
        "      new_key = 'prefill_' + key\n",
        "      prefill_fields[new_key] = fields[key]\n",
        "  prefills = urllib.parse.urlencode(prefill_fields)\n",
        "  src = src + prefills\n",
        "  return src\n",
        "\n",
        "#autofill time if it is not present\n",
        "try: t0;\n",
        "except NameError: t0 = time.time()\n",
        "try: t1;\n",
        "except NameError: t1 = time.time()\n",
        "try: t2;\n",
        "except NameError: t2 = time.time()\n",
        "try: t3;\n",
        "except NameError: t3 = time.time()\n",
        "try: t4;\n",
        "except NameError: t4 = time.time()\n",
        "try: t5;\n",
        "except NameError: t5 = time.time()\n",
        "try: t6;\n",
        "except NameError: t6 = time.time()\n",
        "try: t7;\n",
        "except NameError: t7 = time.time()\n",
        "\n",
        "#autofill fields if they are not present\n",
        "#a missing pennkey and pod will result in an Airtable warning\n",
        "#which is easily fixed user-side.\n",
        "try: pennkey;\n",
        "except NameError: pennkey = \"\"\n",
        "try: my_pod;\n",
        "except NameError: my_pod = \"\"\n",
        "try: learning_from_previous_week;\n",
        "except NameError: learning_from_previous_week = \"\"\n",
        "try: loss_trend;\n",
        "except NameError: loss_trend = \"\"\n",
        "try: expected_accuracies;\n",
        "except NameError: expected_accuracies = \"\"\n",
        "try: memorize_or_generalize;\n",
        "except NameError: memorize_or_generalize = \"\"\n",
        "try: early_stopping;\n",
        "except NameError: early_stopping = \"\"\n",
        "try: l1_l2_assumption;\n",
        "except NameError: l1_l2_assumption = \"\"\n",
        "try: variation;\n",
        "except NameError: variation = \"\"\n",
        "try: train_generalize;\n",
        "except NameError: train_generalize = \"\"\n",
        "try: dropout;\n",
        "except NameError: dropout = \"\"\n",
        "try: lambda1;\n",
        "except NameError: lambda1 = \"\"\n",
        "try: lambda2;\n",
        "except NameError: lambda2 = \"\"\n",
        "try: Elastic_net;\n",
        "except NameError: Elastic_net = \"\"\n",
        "try: data_augment;\n",
        "except NameError: data_augment = \"\"\n",
        "try: dp_diff;\n",
        "except NameError: dp_diff = \"\"\n",
        "\n",
        "times = [(t-t0) for t in [t1,t2,t3,t4,t5,t6,t7]]\n",
        "\n",
        "fields = {\"pennkey\": pennkey,\n",
        "          \"my_pod\": my_pod,\n",
        "          \"learning_from_previous_week\":learning_from_previous_week,\n",
        "          \"loss_trend\": loss_trend,\n",
        "          \"expected_accuracies\": expected_accuracies,\n",
        "          \"memorize_or_generalize\": memorize_or_generalize,\n",
        "          \"early_stopping\": early_stopping,\n",
        "          \"l1_l2_assumption\": l1_l2_assumption,\n",
        "          \"variation\": variation,\n",
        "          \"train_generalize\": train_generalize,\n",
        "          \"dropout\": dropout,\n",
        "          \"lambda1\":lambda1,\n",
        "          \"lambda2\":lambda2,\n",
        "          \"Elastic_net\":Elastic_net,\n",
        "          \"data_augment\":data_augment,\n",
        "          \"dp_diff\": dp_diff,\n",
        "          \"cumulative_times\": times\n",
        "        }\n",
        "\n",
        "src = \"https://airtable.com/embed/shrkmCe3MYALyzZuG?\"\n",
        "\n",
        "#now instead of the original source url, we do: src = prefill_form(src, fields)\n",
        "display(IFrame(src = prefill_form(src, fields), width = 800, height = 400))\n"
      ],
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/html": [
              "\n",
              "        <iframe\n",
              "            width=\"800\"\n",
              "            height=\"400\"\n",
              "            src=\"https://airtable.com/embed/shrkmCe3MYALyzZuG?prefill_pennkey=&prefill_my_pod=discreet-reindeer&prefill_learning_from_previous_week=%25+%5B%5D%5B%5D%5B%22+%22&prefill_loss_trend=436565%25&prefill_expected_accuracies=vhvhjhjg&prefill_memorize_or_generalize=%5B%5D%5B%5D%5B%5D%5B%5D%5B%5D%5B%5D%5B%5D%5B%5D%5B&prefill_early_stopping=%25%22+%22&prefill_l1_l2_assumption=kkk&prefill_variation=ghghjghkl&prefill_train_generalize=&prefill_dropout=ggwp&prefill_lambda1=jjj&prefill_lambda2=&prefill_Elastic_net=ghjg&prefill_data_augment=kjkjkjkjkjkjkjkjk&prefill_dp_diff=gghg&prefill_cumulative_times=%5B2.765655517578125e-05%2C+4.601478576660156e-05%2C+7.009506225585938e-05%2C+8.7738037109375e-05%2C+0.00010466575622558594%2C+0.00012135505676269531%2C+0.00013828277587890625%5D\"\n",
              "            frameborder=\"0\"\n",
              "            allowfullscreen\n",
              "        ></iframe>\n",
              "        "
            ],
            "text/plain": [
              "<IPython.lib.display.IFrame at 0x7facc40f5d68>"
            ]
          },
          "metadata": {
            "tags": []
          }
        }
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "HKn5d3CCC05w"
      },
      "source": [
        "## Feedback\n",
        "How could this session have been better? How happy are you in your group? How do you feel right now?\n",
        "\n",
        "Feel free to use the embeded form below or use this link:\n",
        "<a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://airtable.com/shrNSJ5ECXhNhsYss\">https://airtable.com/shrNSJ5ECXhNhsYss</a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "HIvhG6VZ8zez"
      },
      "source": [
        "display(IFrame(src=\"https://airtable.com/embed/shrNSJ5ECXhNhsYss?backgroundColor=red\", width = 800, height = 400))"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}