{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "name": "W9_Tutorial2.ipynb",
      "provenance": [],
      "collapsed_sections": [],
      "toc_visible": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "accelerator": "GPU"
  },
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/CIS-522/course-content/blob/main/tutorials/W09_RNN/W9_Tutorial2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "V0jMtXo2lrqU"
      },
      "source": [
        "# CIS-522 Week 9 Part 2\n",
        "# Introduction to Long-Short-Term-Memory Networks (LSTNs) and their applications\n",
        "\n",
        "__Instructor__: Lyle Ungar\n",
        "\n",
        "__Content creators:__ Anushree Hede, Pooja Consul\n",
        "\n",
        "---"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "a5NQBmTeSs5S"
      },
      "source": [
        "#@markdown What is your Pennkey and pod? (text, not numbers, e.g. bfranklin)\n",
        "my_pennkey = 'anuhede' #@param {type:\"string\"}\n",
        "my_pod = 'euclidean-wombat' #@param ['Select', 'euclidean-wombat', 'sublime-newt', 'buoyant-unicorn', 'lackadaisical-manatee','indelible-stingray','superfluous-lyrebird','discreet-reindeer','quizzical-goldfish','astute-jellyfish','ubiquitous-cheetah','nonchalant-crocodile','fashionable-lemur','spiffy-eagle','electric-emu','quotidian-lion']\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "8K-Gq7zu1mJw"
      },
      "source": [
        "# @title Week 9 Slides\n",
        "from IPython.display import HTML\n",
        "HTML('<iframe src=\"https://docs.google.com/presentation/d/1TAE6fukLA_Pc6lAZgEOgt-fvg9p2uvEveM-PQVf6D7g/embed?start=false&loop=false&delayms=3000\" frameborder=\"0\" width=\"960\" height=\"569\" allowfullscreen=\"true\" mozallowfullscreen=\"true\" webkitallowfullscreen=\"true\"></iframe>')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "UfitwfVdTERX"
      },
      "source": [
        "---\n",
        "# Setup"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UR8y41zOT4jz"
      },
      "source": [
        "# imports\n",
        "\n",
        "!pip install --upgrade gensim\n",
        "!pip install torchtext==0.4.0\n",
        "!pip install unidecode\n",
        "!pip install d2l\n",
        "\n",
        "import re\n",
        "import os\n",
        "import sys\n",
        "import math\n",
        "import time\n",
        "import nltk\n",
        "import torch\n",
        "import random\n",
        "import string\n",
        "import unidecode\n",
        "import collections\n",
        "\n",
        "import numpy as np\n",
        "import pandas as pd\n",
        "import torch.nn as nn\n",
        "import matplotlib.cm as cm\n",
        "import matplotlib.pyplot as plt\n",
        "import matplotlib.ticker as ticker\n",
        "\n",
        "\n",
        "from gensim.models import Word2Vec\n",
        "from nltk.corpus import brown\n",
        "from sklearn.manifold import TSNE\n",
        "from torch.autograd import Variable\n",
        "from torchtext import data, datasets\n",
        "from torchtext.vocab import Vectors\n",
        "\n",
        "from IPython.display import Image, YouTubeVideo\n",
        "from torch.nn import functional as F\n",
        "from d2l import torch as d2l\n",
        "\n",
        "nltk.download('punkt')\n",
        "nltk.download('averaged_perceptron_tagger')\n",
        "nltk.download('brown')\n",
        "nltk.download('webtext')\n",
        "\n",
        "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3ZEdeLHKe-05",
        "cellView": "form"
      },
      "source": [
        "#@title Seeds\n",
        "seed = 522\n",
        "random.seed(seed)\n",
        "torch.manual_seed(seed)\n",
        "torch.cuda.manual_seed_all(seed)\n",
        "torch.cuda.manual_seed(seed)\n",
        "np.random.seed(seed)\n",
        "torch.backends.cudnn.deterministic = True\n",
        "torch.backends.cudnn.benchmark = False\n",
        "def seed_worker(worker_id):\n",
        "    worker_seed = torch.initial_seed() % 2**32\n",
        "    np.random.seed(worker_seed)\n",
        "    random.seed(worker_seed)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "2nk8O0txk01r",
        "cellView": "form"
      },
      "source": [
        "# @title Figure Settings\n",
        "import ipywidgets as widgets\n",
        "%matplotlib inline \n",
        "fig_w, fig_h = (8, 6)\n",
        "plt.rcParams.update({'figure.figsize': (fig_w, fig_h)})\n",
        "%config InlineBackend.figure_format = 'retina'\n",
        "SMALL_SIZE = 12\n",
        "\n",
        "\n",
        "plt.style.use(\"https://raw.githubusercontent.com/NeuromatchAcademy/\"\n",
        "              \"course-content/master/nma.mplstyle\")\n",
        "\n",
        "# plt.rcParams.update(plt.rcParamsDefault)\n",
        "# plt.rc('font', size=SMALL_SIZE)          # controls default text sizes\n",
        "# plt.rc('axes', titlesize=SMALL_SIZE)     # fontsize of the axes title\n",
        "# plt.rc('axes', labelsize=SMALL_SIZE)    # fontsize of the x and y labels\n",
        "# plt.rc('xtick', labelsize=SMALL_SIZE)    # fontsize of the tick labels\n",
        "# plt.rc('ytick', labelsize=SMALL_SIZE)    # fontsize of the tick labels\n",
        "# plt.rc('legend', fontsize=SMALL_SIZE)    # legend fontsize\n",
        "# plt.rc('figure', titlesize=SMALL_SIZE)  # fontsize of the figure title"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "MljsVg8gp1qr",
        "cellView": "form"
      },
      "source": [
        "#@title Necessary code from Tutorial 1\n",
        "#@markdown (Running this will take a while)\n",
        "batch_size = 32 \n",
        "\n",
        "def tokenize(x):\n",
        "    x = x.split()\n",
        "    for i in range(len(x)):\n",
        "        x[i] = x[i].lower().replace('\\n', '')\n",
        "        x[i] = re.sub(r'[^a-z0-9]+', ' ', x[i])\n",
        "        x[i] = re.sub(r'https?:/\\/\\S+', ' ', x[i])\n",
        "        x[i] = x[i].strip()\n",
        "    return x\n",
        "\n",
        "def load_dataset(sentence_length = 50):\n",
        "    TEXT = data.Field(sequential=True, tokenize=tokenize, lower=True,\n",
        "                      include_lengths=True, batch_first=True, fix_length=sentence_length)\n",
        "    LABEL = data.LabelField(dtype=torch.float)\n",
        "\n",
        "    train_data, test_data = datasets.IMDB.splits(TEXT, LABEL)\n",
        "\n",
        "    # If no specific vector embeddings are specified,\n",
        "    # Torchtext initializes random vector embeddings\n",
        "    # which would get updated during training through backpropagation.\n",
        "    TEXT.build_vocab(train_data)\n",
        "    LABEL.build_vocab(train_data)\n",
        "\n",
        "    train_data, valid_data = train_data.split(split_ratio=0.7, random_state = random.seed(seed))\n",
        "    train_iter, valid_iter, test_iter = data.BucketIterator.splits((train_data, valid_data, test_data),\n",
        "                                                                   batch_size=batch_size, sort_key=lambda x: len(x.text),\n",
        "                                                                   repeat=False, shuffle=True)\n",
        "    vocab_size = len(TEXT.vocab)\n",
        "\n",
        "    return TEXT, vocab_size, train_iter, valid_iter, test_iter\n",
        "\n",
        "\n",
        "def train(model, device, train_iter, valid_iter, epochs, learning_rate):\n",
        "    criterion = nn.CrossEntropyLoss()\n",
        "    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n",
        "    \n",
        "    train_loss, validation_loss = [], []\n",
        "    train_acc, validation_acc = [], []\n",
        "\n",
        "    for epoch in range(epochs):\n",
        "      #train\n",
        "      model.train()\n",
        "      running_loss = 0.\n",
        "      correct, total = 0, 0 \n",
        "      steps = 0\n",
        "\n",
        "      for idx, batch in enumerate(train_iter):\n",
        "        text = batch.text[0]\n",
        "        # print(type(text), text.shape)\n",
        "        target = batch.label\n",
        "        target = torch.autograd.Variable(target).long()\n",
        "        text, target = text.to(device), target.to(device)\n",
        "\n",
        "        # add micro for coding training loop\n",
        "        optimizer.zero_grad()\n",
        "        output = model(text)\n",
        " \n",
        "        loss = criterion(output, target)\n",
        "        loss.backward()\n",
        "        optimizer.step()\n",
        "        steps += 1\n",
        "        running_loss += loss.item()\n",
        "\n",
        "        # get accuracy \n",
        "        _, predicted = torch.max(output, 1)\n",
        "        total += target.size(0)\n",
        "        correct += (predicted == target).sum().item()\n",
        "        \n",
        "        # if steps % 100 == 0:\n",
        "        #     print (f'Epoch: {epoch+1}, Idx: {idx+1}, Training Loss: {loss.item():.4f}, Training Accuracy: {100*(predicted == target).sum().item()/len(batch): .2f}%')\n",
        "\n",
        "      train_loss.append(running_loss/len(train_iter))\n",
        "      train_acc.append(correct/total)\n",
        "\n",
        "      print(f'Epoch: {epoch + 1},  Training Loss: {running_loss/len(train_iter):.4f}, Training Accuracy: {100*correct/total: .2f}%')\n",
        "\n",
        "      # evaluate on validation data\n",
        "      model.eval()\n",
        "      running_loss = 0.\n",
        "      correct, total = 0, 0 \n",
        "\n",
        "      with torch.no_grad():\n",
        "        for idx, batch in enumerate(valid_iter):\n",
        "            text = batch.text[0]\n",
        "            target = batch.label\n",
        "            target = torch.autograd.Variable(target).long()\n",
        "            text, target = text.to(device), target.to(device)\n",
        "\n",
        "            optimizer.zero_grad()\n",
        "            output = model(text)\n",
        "    \n",
        "            loss = criterion(output, target)\n",
        "            running_loss += loss.item()\n",
        "\n",
        "            # get accuracy \n",
        "            _, predicted = torch.max(output, 1)\n",
        "            total += target.size(0)\n",
        "            correct += (predicted == target).sum().item()\n",
        "\n",
        "      validation_loss.append(running_loss/len(valid_iter))\n",
        "      validation_acc.append(correct/total)\n",
        "\n",
        "      print (f'Validation Loss: {running_loss/len(valid_iter):.4f}, Validation Accuracy: {100*correct/total: .2f}%')\n",
        "  \n",
        "    return train_loss, train_acc, validation_loss, validation_acc\n",
        "\n",
        "def test(model,  device, test_iter):\n",
        "  model.eval()\n",
        "  correct = 0\n",
        "  total = 0\n",
        "  with torch.no_grad():\n",
        "    for idx, batch in enumerate(test_iter):\n",
        "        text = batch.text[0]\n",
        "        target = batch.label\n",
        "        target = torch.autograd.Variable(target).long()\n",
        "        text, target = text.to(device), target.to(device)\n",
        "\n",
        "        outputs = model(text)\n",
        "        _, predicted = torch.max(outputs, 1)\n",
        "        total += target.size(0)\n",
        "        correct += (predicted == target).sum().item()\n",
        "\n",
        "    acc = 100 * correct / total\n",
        "    return acc\n",
        "\n",
        "def plot_train_val(x, train, val, train_label, val_label, title):\n",
        "  plt.plot(x, train, label=train_label)\n",
        "  plt.plot(x, val, label=val_label)\n",
        "  plt.legend(loc='lower right')\n",
        "  plt.xlabel('epoch')\n",
        "  plt.title(title)\n",
        "  plt.show()\n",
        "\n",
        "def count_parameters(model):\n",
        "    parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
        "    return parameters\n",
        "\n",
        "def init_weights(m):\n",
        "    if type(m) in (nn.Linear, nn.Conv1d, nn.GRU):\n",
        "        nn.init.xavier_uniform_(m.weight)\n",
        "\n",
        "def grad_clipping(net, theta):  \n",
        "    \"\"\"Clip the gradient.\"\"\"\n",
        "    params = [p for p in net.parameters() if p.requires_grad]\n",
        "\n",
        "    norm = torch.sqrt(sum(torch.sum((p.grad ** 2)) for p in params))\n",
        "    \n",
        "    if norm > theta:\n",
        "        for param in params:\n",
        "            param.grad[:] *= theta / norm\n",
        "\n",
        "TEXT, vocab_size, train_iter, valid_iter, test_iter = load_dataset()\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ODNNWNQTF0Xb"
      },
      "source": [
        "---\n",
        "# Section 1: Long-Short-Term-Memory Networks (LSTMs)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "8OMaVtnw2MA5",
        "cellView": "form"
      },
      "source": [
        "#@title Video: LSTMs\n",
        "import time\n",
        "try: t0;\n",
        "except NameError: t0=time.time()\n",
        "\n",
        "from IPython.display import YouTubeVideo\n",
        "video = YouTubeVideo(id=\"8vUJMpYRdjk\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "EzQJ4RT999Q_"
      },
      "source": [
        "## Section 1.1: Architecture\n",
        "\n",
        "The core idea behind an LSTM is the cell state $C_t$ that runs along all the LSTM units in a layer, and gets updated along the way. These updates are possible through \"gates\". Gates are made out of a sigmoid neural net layer and a pointwise multiplication operation. \n",
        "\n",
        "Each LSTM unit performs the following distinct steps using the input $x_t$, current cell state $C_t$ and previous hidden state $h_{t-1}$:\n",
        "\n",
        "* Forget Gate: *Should I throw away information from this cell?*\n",
        "$$f_t = \\sigma (W_f . [h_{t-1}, x_t] + b_f)$$ \n",
        "\n",
        "* Input Gate:\n",
        "    * *Should I add new values to this cell?*\n",
        "$$i_t = \\sigma (W_i . [h_{t-1}, x_t] + b_i)$$\n",
        "    * *What new candidate values should I store?*\n",
        "$$\\tilde{C}_t = tanh (W_C . [h_{t-1}, x_t] + b_C)$$ \n",
        "\n",
        "* Update cell state: *Forget things from the past and add new things from the candidates*\n",
        "$$C_t = (f_t * C_{t-1}) + (i_t * \\tilde{C}_t)$$ \n",
        "\n",
        "* Output Gate: \n",
        "    * *What information should I output?*\n",
        "$$o_t = \\sigma (W_o . [h_{t-1}, x_t] + b_o)$$ \n",
        "    * *How much of the cell state should I store in the hidden state?*\n",
        "$$h_t = o_t * tanh(C_t)$$ \n",
        "\n",
        "The architecture can be summarized by the diagram below:\n",
        "![image.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2ddQlFPtF89W"
      },
      "source": [
        "Since the candidate memory cell ensures that the value range is between  −1  and  1  by using the  $tanh$  function, why does the hidden state need to use the  $tanh$  function again to ensure that the output value range is between  −1  and  1 ?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "M476xI8nevwU"
      },
      "source": [
        "lstm_range = '1' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "6jVPCakpGKdD"
      },
      "source": [
        "**to_remove**\n",
        "\n",
        "The two inputs to the update operation of $C_t$ can have the maximum value of $1$. Hence, the sum of these inputs could result in a value $>1$; which is undesirable. Thus, before $C_t$ is passed to the output gate, it is passed through a $tanh$ to ensure that the range of $H_t$ lies between $[-1, 1]$."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "o5EHHzS7pdRE"
      },
      "source": [
        "### Exercise 1\n",
        "\n",
        "It is now your turn to build an LSTM network in PyTorch. Feel free to refer to the documentation here: https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html#torch.nn.LSTM . \n",
        "\n",
        "* Once again we will use `nn.Embedding`. You are given the `vocab_size` and the `embed_size`.\n",
        "* Add the [`LSTM`](https://pytorch.org/docs/stable/generated/torch.nn.LSTM.html#torch.nn.LSTM) layers. \n",
        "* Define a dropout layer of 0.5. \n",
        "* Determine the size of inputs and outputs to the fully-connected layer.\n",
        "* Pay special attention to the shapes of your inputs and outputs as you write the forward function.\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lkouuOq6XwHx"
      },
      "source": [
        "class LSTM(nn.Module):\n",
        "  def __init__(self, layers, output_size, hidden_size, vocab_size, embed_size):\n",
        "    super(LSTM, self).__init__()\n",
        "\n",
        "    self.output_size = output_size\n",
        "    self.hidden_size = hidden_size\n",
        "    self.vocab_size = vocab_size\n",
        "    self.embed_size = embed_size\n",
        "    self.n_layers = layers\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"LSTM Init\")\n",
        "    ####################################################################\n",
        "    self.word_embeddings = ...\n",
        "    self.dropout = ...\n",
        "    self.lstm = ...\n",
        "    self.fc = ...\n",
        "\n",
        "  def forward(self, input_sentences):\n",
        "    \"\"\"Hint: Make sure the shapes of your tensors match the requirement\"\"\"\n",
        "    \n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"LSTM Forward\")\n",
        "    ####################################################################\n",
        "\n",
        "    # Embeddings \n",
        "    # `input` shape: (`num_steps`, `batch_size`, `num_hiddens`)\n",
        "    input = ...\n",
        "    \n",
        "    # Initialize a random hidden state and cell state for this sequence\n",
        "    hidden = (torch.randn(self.n_layers, input.shape[1], self.hidden_size).to(device),\n",
        "            torch.randn(self.n_layers, input.shape[1], self.hidden_size).to(device))\n",
        "    \n",
        "    # Dropout for regularization\n",
        "    input = self.dropout(input)\n",
        "    \n",
        "    # LSTM \n",
        "    output, hidden = ...\n",
        "    \n",
        "    # Pick the hidden state (not cell state) and reshape it for the linear layer\n",
        "    h_n = ...\n",
        "\n",
        "    # Linear \n",
        "    logits = self.fc(h_n)\n",
        "    return logits\n",
        "\n",
        "# # Uncomment to run\n",
        "# sampleLSTM = LSTM(3, 10, 100, 1000, 300)\n",
        "# print(sampleLSTM)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "H2CsZpnXjnbO"
      },
      "source": [
        "Sample Output\n",
        "\n",
        "```\n",
        "LSTM(\n",
        "  (word_embeddings): Embedding(1000, 300)\n",
        "  (dropout): Dropout(p=0.5, inplace=False)\n",
        "  (lstm): LSTM(300, 100, num_layers=3)\n",
        "  (fc): Linear(in_features=300, out_features=10, bias=True)\n",
        ")\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lQ0d_ZgAwmJv"
      },
      "source": [
        "# Solution - to remove\n",
        "class LSTM(nn.Module):\n",
        "  def __init__(self, layers, output_size, hidden_size, vocab_size, embed_size):\n",
        "    super(LSTM, self).__init__()\n",
        "\n",
        "    self.output_size = output_size\n",
        "    self.hidden_size = hidden_size\n",
        "    self.vocab_size = vocab_size\n",
        "    self.embed_size = embed_size\n",
        "    self.n_layers = layers\n",
        "\n",
        "    self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n",
        "    self.dropout = nn.Dropout(0.5)\n",
        "    self.lstm = nn.LSTM(embed_size, hidden_size, num_layers=self.n_layers)\n",
        "    self.fc = nn.Linear(self.n_layers*self.hidden_size, output_size)\n",
        "\n",
        "  def forward(self, input_sentences):\n",
        "    input = self.word_embeddings(input_sentences).permute(1, 0, 2)\n",
        "    hidden = (torch.randn(self.n_layers, input.shape[1], self.hidden_size).to(device),\n",
        "            torch.randn(self.n_layers, input.shape[1], self.hidden_size).to(device))\n",
        "    input = self.dropout(input)\n",
        "    output, hidden = self.lstm(input, hidden)\n",
        "    h_n = hidden[0].permute(1, 0, 2)\n",
        "    h_n = h_n.contiguous().view(h_n.shape[0], -1)\n",
        "    logits = self.fc(h_n)\n",
        "    return logits"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cxZw94BWfpaA"
      },
      "source": [
        "The cell below will take 1-2 minutes to run. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XuO32iAI3-n3"
      },
      "source": [
        "# Hyperparameters\n",
        "learning_rate = 0.0003\n",
        "output_size = 2\n",
        "hidden_size = 16\n",
        "embedding_length = 100\n",
        "epochs = 20\n",
        "layers = 2\n",
        "\n",
        "# Model, training, testing\n",
        "lstm_model = LSTM(layers, output_size, hidden_size, vocab_size, embedding_length)\n",
        "lstm_model.to(device)\n",
        "lstm_train_loss, lstm_train_acc, lstm_validation_loss, lstm_validation_acc = train(lstm_model, device, train_iter, valid_iter, epochs, learning_rate)\n",
        "test_accuracy = test(lstm_model, device, test_iter)\n",
        "print('Test Accuracy: ',  test_accuracy, '%\\n')\n",
        "\n",
        "# Plotting accuracy curve\n",
        "with plt.xkcd():\n",
        "    plot_train_val(np.arange(0,epochs), lstm_train_acc, lstm_validation_acc,\n",
        "                   'training_accuracy', 'validation_accuracy', 'LSTM on IMDB text classification')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "mnE-iVHTiGDR"
      },
      "source": [
        "## Section 1.2: Gated Recurrent Units (GRU)\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "axsQZRN8-FMm"
      },
      "source": [
        "\n",
        "The GRU architecture looks very similar to the LSTM, and is often used as an alternative to the traditional LSTM. It also contains some variations that reduce it's complexity. For example, it combines the forget and input gates into a single “update gate”; it contains a \"hidden state\" but not a \"cell state\". In the next section we will be using GRUs as the choice of recurrent unit in our models, but you can always swap out the GRU for an LSTM later on (make sure that you take care of input and output dimensions in this case). Here is a description of the parts of the GRU:\n",
        "\n",
        "* Reset Gate: *How much of the previous hidden state should I remember?*\n",
        "$$r_t = \\sigma (W_r . [h_{t-1}, x_t])$$\n",
        "\n",
        "* Update Gate: \n",
        "    * *How much of the new state is just a different from the old state?*\n",
        "$$z_t = \\sigma (W_z . [h_{t-1}, x_t])$$\n",
        "    * *What new candidate values should I store?*\n",
        "$$\\tilde{h}_t = tanh (W . [r_t * h_{t-1}, x_t ])$$\n",
        "\n",
        "* Update hidden state: *Deciding how much of the old hidden state to keep and discard*\n",
        "$$h_t = ((1-z_t)*h_{t-1} ) + (z_t * \\tilde{h}_t) $$\n",
        "\n",
        "\n",
        "Here is what the architecture looks like:\n",
        "\n",
        "![image.png]()"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "An_ticnpZRRP"
      },
      "source": [
        "## Section 1.3: biLSTMs\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "gnA9iiU_2_Uw",
        "cellView": "form"
      },
      "source": [
        "#@title Video: biLSTMs\n",
        "try: t1;\n",
        "except NameError: t1=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"7oYePBOq4ec\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "rXVeMe6JKFml"
      },
      "source": [
        "#### Exercise 2\n",
        "\n",
        "Let's apply the knowledge to write a bi-LSTM using PyTorch.\n",
        "\n",
        "* Use an Embedding layer\n",
        "* Dropout of 0.5\n",
        "* Add 2 LSTM layers\n",
        "* Linear layer "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sfhd1ifldtd_"
      },
      "source": [
        "# Exercise\n",
        "class biLSTM(nn.Module):\n",
        "  def __init__(self, output_size, hidden_size, vocab_size, embed_size):\n",
        "    super(biLSTM, self).__init__()\n",
        "\n",
        "    self.output_size = output_size\n",
        "    self.hidden_size = hidden_size\n",
        "    self.vocab_size = vocab_size\n",
        "    self.embed_size = embed_size\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...)\n",
        "    raise NotImplementedError(\"biLSTM\")\n",
        "    ####################################################################\n",
        "    self.word_embeddings = ...\n",
        "    self.dropout = ...\n",
        "    self.bilstm = ...\n",
        "    self.fc = ...\n",
        "\n",
        "  def forward(self, input_sentences):\n",
        "    \n",
        "    input = self.word_embeddings(input_sentences).permute(1, 0, 2)\n",
        "    hidden = (torch.randn(4, input.shape[1], self.hidden_size).to(device),\n",
        "            torch.randn(4, input.shape[1], self.hidden_size).to(device))\n",
        "    input = self.dropout(input)\n",
        "    \n",
        "    output, hidden = self.bilstm(input, hidden)\n",
        "\n",
        "    h_n = hidden[0].permute(1, 0, 2)\n",
        "    h_n = h_n.contiguous().view(h_n.shape[0], -1)\n",
        "    logits = self.fc(h_n)\n",
        "    return logits\n",
        "\n",
        "# # Uncomment to run\n",
        "# sampleBiLSTM = biLSTM(10, 100, 1000, 300)\n",
        "# print(sampleBiLSTM)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "q8ZTdKPJASZc"
      },
      "source": [
        "Sample output:\n",
        "\n",
        "```\n",
        "biLSTM(\n",
        "  (word_embeddings): Embedding(1000, 300)\n",
        "  (dropout): Dropout(p=0.5, inplace=False)\n",
        "  (bilstm): LSTM(300, 100, num_layers=2, bidirectional=True)\n",
        "  (fc): Linear(in_features=400, out_features=10, bias=True)\n",
        ")\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "erWXW6TXWPWR"
      },
      "source": [
        "# solution to_remove\n",
        "class biLSTM(nn.Module):\n",
        "  def __init__(self, output_size, hidden_size, vocab_size, embed_size):\n",
        "    super(biLSTM, self).__init__()\n",
        "\n",
        "    self.output_size = output_size\n",
        "    self.hidden_size = hidden_size\n",
        "    self.vocab_size = vocab_size\n",
        "    self.embed_size = embed_size\n",
        "\n",
        "    self.word_embeddings = nn.Embedding(vocab_size, embed_size)\n",
        "    self.dropout = nn.Dropout(0.5)\n",
        "    self.bilstm = nn.LSTM(embed_size, hidden_size, num_layers=2, bidirectional=True)\n",
        "    self.fc = nn.Linear(4*hidden_size, output_size)\n",
        "\n",
        "  def forward(self, input_sentences):\n",
        "\n",
        "    input = self.word_embeddings(input_sentences).permute(1, 0, 2)\n",
        "    hidden = (torch.randn(4, input.shape[1], self.hidden_size).to(device),\n",
        "            torch.randn(4, input.shape[1], self.hidden_size).to(device))\n",
        "    input = self.dropout(input)\n",
        "    \n",
        "    output, hidden = self.bilstm(input, hidden)\n",
        "\n",
        "    h_n = hidden[0].permute(1, 0, 2)\n",
        "    h_n = h_n.contiguous().view(h_n.shape[0], -1)\n",
        "    logits = self.fc(h_n)\n",
        "    return logits"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gpeQoeGhff0F"
      },
      "source": [
        "The cell below will take 1-2 minutes to run. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "_YId_EgB8RTQ"
      },
      "source": [
        "# Hyperparameters\n",
        "learning_rate = 0.0003\n",
        "batch_size = 32\n",
        "output_size = 2\n",
        "hidden_size = 16\n",
        "embedding_length = 100\n",
        "epochs = 20\n",
        "\n",
        "# Model, training and testing \n",
        "biLSTM_model = biLSTM(output_size, hidden_size, vocab_size, embedding_length)\n",
        "biLSTM_model.to(device)\n",
        "biLSTM_train_loss, biLSTM_train_acc, biLSTM_validation_loss, biLSTM_validation_acc = train(biLSTM_model, device, train_iter, valid_iter, epochs, learning_rate)\n",
        "test_accuracy = test(biLSTM_model, device, test_iter)\n",
        "print('Test Accuracy: ',  test_accuracy, '%\\n')\n",
        "\n",
        "# Plot accuracy curve\n",
        "with plt.xkcd():\n",
        "    plot_train_val(np.arange(0,epochs), biLSTM_train_acc, biLSTM_validation_acc,\n",
        "                   'training_accuracy', 'validation_accuracy', 'biLSTM on IMDB text classification')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "21tBewIICkSF"
      },
      "source": [
        "*Estimated time: 40 minutes since start*"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zn8Zb2vln4-5"
      },
      "source": [
        "---\n",
        "# Section 2: Applications of RNNs in  NLP\n",
        "\n",
        "### **Important!** \n",
        "\n",
        "In this section, you will use your knowledge of recurrent neural networks and build some interesting NLP applications! For the remainder of the tutorial we will be switching from word-level models to character-level models; which means the text will be tokenized at the character level. We do this in the interest of simplifying our task by limiting our vocabulary (which is now a set of characters instead of words).   "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "jHPT0KGyadMP"
      },
      "source": [
        "\n",
        "## Section 2.1: Text Generation and Language Modelling\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "cYbSwtof2avn",
        "cellView": "form"
      },
      "source": [
        "#@title Video: RNN applications and Language Models\n",
        "try: t2;\n",
        "except NameError: t2=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"pVZ65jRalAU\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "MFN2hbsxV8Uu"
      },
      "source": [
        "\n",
        "\n",
        "Sources: https://pytorch.org/tutorials/intermediate/char_rnn_generation_tutorial.html;  \n",
        "https://d2l.ai/chapter_recurrent-neural-networks/rnn.html#rnn-based-character-level-language-models\n",
        "  \n",
        "The first application we will discuss in this section is that of text generation using neural language models. In linguistic theory, a language model is a probability distribution over sequences of words. The task for the model is posed as follows: given a history or context of words, can you predict the next word in the sequence? \n",
        "\n",
        "Recurrent neural networks are a natural choice for this task, since they have the ability to capture information from past observations through the hidden state. Andrej Karpathy's [blog post](https://karpathy.github.io/2015/05/21/rnn-effectiveness/) is an excellent read for developing intuition for this task. For this exercise, you will train a character-level text generation model, give it an initial \"start\" string, and watch how it generates characters! \n",
        "\n",
        "At any given time step, the model takes one character and a hidden state as input. We convert the output of the model to a probability distribution over the possible characters, and pick a character from this distribution as the next prediction. This \"generated\" character is then passed to the model as the input in the next time step. This process is repeated for a fixed number of time steps.  (In real generation, these is often a special \"stop character\" that determines when to stop generation.   \n",
        "\n",
        "![image.png]()\n",
        "\n",
        "(Language models are generally evaluated using a metric known as [perplexity](https://https://towardsdatascience.com/perplexity-intuition-and-derivation-105dd481c8f3), which is 2 to the entropy of the probability distribution of the language model, measured in bits.)\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "VU_QkjcrUnWi",
        "cellView": "form"
      },
      "source": [
        "#@markdown ### Preparing the input (run me)\n",
        "\n",
        "! wget http://www.gutenberg.org/files/1522/1522-0.txt\n",
        "\n",
        "# Read the input training file - Julius Caesar \n",
        "file = unidecode.unidecode(open('1522-0.txt').read()).lower() \n",
        "file = re.sub(r'[^a-z]+', ' ', file)\n",
        "file_len = len(file)\n",
        "print('file_len =', file_len)\n",
        "\n",
        "# Print a random chunk from the training data \n",
        "def random_chunk(chunk_len):\n",
        "    start_index = random.randint(0, file_len - chunk_len)\n",
        "    end_index = start_index + chunk_len + 1\n",
        "    return file[start_index:end_index]\n",
        "\n",
        "chunk_len = 100\n",
        "print(random_chunk(chunk_len))\n",
        "\n",
        "# Get a random chunk from the traning data, \n",
        "# Convert its first n-1 chars into input char tensor\n",
        "# Convert its last n-1 chars into target char tensor\n",
        "def random_training_set():    \n",
        "    chunk = random_chunk(chunk_len)\n",
        "    inp = char_tensor(chunk[:-1])\n",
        "    target = char_tensor(chunk[1:])\n",
        "    return inp, target\n",
        "\n",
        "# Get all printable characters for generation\n",
        "# all_characters = string.printable\n",
        "all_characters = string.ascii_lowercase\n",
        "all_characters += ' '\n",
        "n_characters = len(all_characters)\n",
        "\n",
        "# Turn string into list of longs\n",
        "def char_tensor(string):\n",
        "    tensor = torch.zeros(len(string)).long()\n",
        "    for c in range(len(string)):\n",
        "        tensor[c] = all_characters.index(string[c])\n",
        "    return tensor"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0kwt3-xCpkM7"
      },
      "source": [
        "v = 0\n",
        "o = 0\n",
        "s = 0\n",
        "for i, c in enumerate(file):\n",
        "    if c == 'i':\n",
        "        if file[i+1] == 'a' or file[i+1] == 'e' or file[i+1] == 'i' or file[i+1] == 'o' or file[i+1] == 'u':\n",
        "            v += 1\n",
        "        elif file[i+1] == ' ':\n",
        "            s += 1\n",
        "        else:\n",
        "            o += 1 \n",
        "\n",
        "print(v, o, s)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "zSi1yVe9WGkF"
      },
      "source": [
        "### Network"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RPeZi7cq_tNP"
      },
      "source": [
        "class GenerationRNN(nn.Module):\n",
        "    def __init__(self, input_size, hidden_size, output_size, n_layers=1):\n",
        "        super(GenerationRNN, self).__init__()\n",
        "        self.input_size = input_size\n",
        "        self.hidden_size = hidden_size\n",
        "        self.output_size = output_size\n",
        "        self.n_layers = n_layers\n",
        "        \n",
        "        self.encoder = nn.Embedding(input_size, hidden_size)\n",
        "        self.gru = nn.GRU(hidden_size, hidden_size, n_layers)\n",
        "        self.decoder = nn.Linear(hidden_size, output_size)\n",
        "    \n",
        "    def forward(self, input, hidden):\n",
        "        input = self.encoder(input.view(1, -1))\n",
        "        output, hidden = self.gru(input, hidden)\n",
        "        output = self.decoder(hidden.view(1, -1))\n",
        "        return output, hidden\n",
        "\n",
        "    def init_hidden(self):\n",
        "        return torch.zeros(self.n_layers, 1, self.hidden_size)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "z4H6bluzWPFi"
      },
      "source": [
        "### Generate"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "80Vo1whF1116"
      },
      "source": [
        "#### Exercise 3\n",
        "\n",
        "Now that you know what the network looks like, write the function below that:\n",
        "* Takes in a `prime_str` and builds up a hidden state \n",
        "* Uses the built up hidden state to iteratively generate `predict_len` number of characters from the model\n",
        "* To predict the next state, softmax the output from the model and pick the character with the maximum probability "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "-rueqVibACZa"
      },
      "source": [
        "def evaluate(net, prime_str, predict_len):\n",
        "    hidden = net.init_hidden()\n",
        "    predicted = prime_str\n",
        "\n",
        "    # \"Building up\" the hidden state\n",
        "    for p in range(len(prime_str) - 1):\n",
        "        inp = char_tensor(prime_str[p])\n",
        "        _, hidden = net(inp, hidden)\n",
        "    \n",
        "    # Tensorize of the last character\n",
        "    inp = char_tensor(prime_str[-1])\n",
        "    \n",
        "    # For every index to predict\n",
        "    for p in range(predict_len):\n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Generation\")\n",
        "        ####################################################################\n",
        "\n",
        "        # Pass the inputs to the model\n",
        "        output, hidden = ...\n",
        "        \n",
        "        # Pick the character with the highest probability \n",
        "        top_i = ...\n",
        "\n",
        "        # Add predicted character to string and use as next input\n",
        "        predicted_char = all_characters[top_i]\n",
        "        predicted += predicted_char\n",
        "        inp = char_tensor(predicted_char)\n",
        "\n",
        "    return predicted\n",
        "\n",
        "# # Uncomment to run\n",
        "# sampleDecoder = GenerationRNN(27, 100, 27, 1)\n",
        "# text = evaluate(sampleDecoder, 'hi', 10)\n",
        "# if text.startswith('hi') and len(text) == 12:\n",
        "#     print('Success!')\n",
        "# else:\n",
        "#     print('Need to change.')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3kl7MH0V2n55"
      },
      "source": [
        "# Solution - to remove\n",
        "def evaluate(net, prime_str, predict_len):\n",
        "    hidden = net.init_hidden()\n",
        "    predicted = prime_str\n",
        "\n",
        "    # \"Building up\" the hidden state\n",
        "    for p in range(len(prime_str) - 1):\n",
        "        inp = char_tensor(prime_str[p])\n",
        "        _, hidden = net(inp, hidden)\n",
        "    \n",
        "    # Tensorize of the last character\n",
        "    inp = char_tensor(prime_str[-1])\n",
        "    \n",
        "    # For every index to predict\n",
        "    for p in range(predict_len):\n",
        "        \n",
        "        # Pass the inputs to the model\n",
        "        output, hidden = net(inp, hidden)\n",
        "        \n",
        "        # Pick the character with the highest probability \n",
        "        top_i = torch.argmax(torch.softmax(output, dim=1))\n",
        "\n",
        "        # Add predicted character to string and use as next input\n",
        "        predicted_char = all_characters[top_i]\n",
        "        predicted += predicted_char\n",
        "        inp = char_tensor(predicted_char)\n",
        "\n",
        "    return predicted"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "4ZKyuqzCAJpC",
        "cellView": "form"
      },
      "source": [
        "#@title Train\n",
        "\n",
        "# Single training step\n",
        "def train(inp, target):\n",
        "    # Initialize hidden state, zero the gradients of decoder \n",
        "    hidden = decoder.init_hidden()\n",
        "    decoder.zero_grad()\n",
        "    loss = 0\n",
        "    # For each character in our chunk (except last), compute the hidden and ouput\n",
        "    # Using each output, compute the loss with the corresponding target \n",
        "    for c in range(chunk_len):\n",
        "        output, hidden = decoder(inp[c], hidden)\n",
        "        loss += criterion(output, target[c].unsqueeze(0))\n",
        "    \n",
        "    # Backpropagate, clip gradient and optimize\n",
        "    loss.backward()\n",
        "    grad_clipping(decoder, 1)\n",
        "    decoder_optimizer.step()\n",
        "\n",
        "    # Return average loss\n",
        "    return loss.data.item() / chunk_len\n",
        "\n",
        "n_epochs = 3000\n",
        "print_every = 500\n",
        "plot_every = 10\n",
        "hidden_size = 100\n",
        "n_layers = 1\n",
        "lr = 0.0005\n",
        "\n",
        "# Create model, optimizer and loss function\n",
        "decoder = GenerationRNN(n_characters, hidden_size, n_characters, n_layers)\n",
        "decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)\n",
        "criterion = nn.CrossEntropyLoss()\n",
        "\n",
        "all_losses = []\n",
        "loss_avg = 0\n",
        "\n",
        "# For every epoch\n",
        "for epoch in range(1, n_epochs + 1):\n",
        "    # Get a random (input, target) pair from training set and perform one training iteration    \n",
        "    loss = train(*random_training_set())       \n",
        "    loss_avg += loss\n",
        "    \n",
        "    if epoch % print_every == 0:\n",
        "        text = evaluate(decoder, 'th', 50)\n",
        "        print('Epoch '+ str(epoch) + ' --------------------\\n\\t' + text)\n",
        "        \n",
        "    if epoch % plot_every == 0:\n",
        "        all_losses.append(loss_avg / plot_every)\n",
        "        loss_avg = 0\n",
        "\n",
        "print('\\n')\n",
        "with plt.xkcd():\n",
        "    plt.figure()\n",
        "    plt.plot(all_losses)\n",
        "    plt.xlabel('Epochs')\n",
        "    plt.ylabel('Loss')\n",
        "    plt.title('Training loss for text generation')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-7zaVRIQbcxc"
      },
      "source": [
        "What do you observe about the output of the text generation model? Why do you think this is happening?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3JcK_jVgcDC5",
        "cellView": "form"
      },
      "source": [
        "generation_output = '2' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "JBDtDCCUbrhY"
      },
      "source": [
        "**Solution**\n",
        "\n",
        "We notice that the same set of characters are generated repeatedly, the model always picks the best choice, and doesn't explore or sample across different options of characters at each step.  "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "drOj5o3TMbXO"
      },
      "source": [
        "### Improve Generation"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "4eIPn3jy3qCt"
      },
      "source": [
        "#### Exercise 4\n",
        "\n",
        "Choosing the character with the highest probability at each time step did not allow us to fully explore the variability in language. For this, we must let the model \"explore\" other character choices as well. One of the ways to do this is to sample from a probability distribution. \n",
        "\n",
        "Implement the function to generate text again, but this time we cast the output to a probability distribution. Your task is to sample a character from this distribution. Use this function: https://pytorch.org/docs/stable/generated/torch.multinomial.html "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Wsnv8LdebAyS"
      },
      "source": [
        "def evaluateMultinomial(net, prime_str, predict_len, temperature=0.8):\n",
        "    hidden = net.init_hidden()\n",
        "    predicted = prime_str\n",
        "\n",
        "    # \"Building up\" the hidden state\n",
        "    for p in range(len(prime_str) - 1):\n",
        "        inp = char_tensor(prime_str[p])\n",
        "        _, hidden = net(inp, hidden)\n",
        "    \n",
        "    # Tensorize of the last character\n",
        "    inp = char_tensor(prime_str[-1])\n",
        "    \n",
        "    # For every index to predict\n",
        "    for p in range(predict_len):\n",
        "\n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Generation Improve\")\n",
        "        ####################################################################\n",
        "\n",
        "        # Pass the character + previous hidden state to the model\n",
        "        output, hidden = ...\n",
        "        \n",
        "        # Sample from the network as a multinomial distribution\n",
        "        output = output.data.view(-1).div(temperature).exp()\n",
        "        top_i = ...\n",
        "        \n",
        "        # Add predicted character to string and use as next input\n",
        "        predicted_char = all_characters[top_i]\n",
        "        predicted += predicted_char\n",
        "        inp = char_tensor(predicted_char)\n",
        "\n",
        "    return predicted\n",
        "\n",
        "# # Uncomment to run\n",
        "# sampleDecoder = GenerationRNN(27, 100, 27, 1)\n",
        "# text = evaluateMultinomial(sampleDecoder, 'hi', 10)\n",
        "# if text.startswith('hi') and len(text) == 12:\n",
        "#     print('Success!')\n",
        "# else:\n",
        "#     print('Need to change.')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "C4j99LqJ3U0A"
      },
      "source": [
        "# Solution - to remove\n",
        "def evaluateMultinomial(net, prime_str, predict_len, temperature=0.8):\n",
        "    hidden = net.init_hidden()\n",
        "    predicted = prime_str\n",
        "\n",
        "    # \"Building up\" the hidden state\n",
        "    for p in range(len(prime_str) - 1):\n",
        "        inp = char_tensor(prime_str[p])\n",
        "        _, hidden = net(inp, hidden)\n",
        "    \n",
        "    # Tensorize of the last character\n",
        "    inp = char_tensor(prime_str[-1])\n",
        "    \n",
        "    # For every index to predict\n",
        "    for p in range(predict_len):\n",
        "\n",
        "        # Pass the character + previous hidden state to the model\n",
        "        output, hidden = net(inp, hidden)\n",
        "        \n",
        "        # Sample from the network as a multinomial distribution\n",
        "        output_dist = output.data.view(-1).div(temperature).exp()\n",
        "        top_i = torch.multinomial(output_dist, 1)[0]\n",
        "        \n",
        "        # Add predicted character to string and use as next input\n",
        "        predicted_char = all_characters[top_i]\n",
        "        predicted += predicted_char\n",
        "        inp = char_tensor(predicted_char)\n",
        "\n",
        "    return predicted"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QmBiRdDegaj6",
        "cellView": "form"
      },
      "source": [
        "#@title Re-Train\n",
        "# Single training step\n",
        "def train(inp, target):\n",
        "    # Initialize hidden state, zero the gradients of decoder \n",
        "    hidden = decoder.init_hidden()\n",
        "    decoder.zero_grad()\n",
        "    loss = 0\n",
        "\n",
        "    # For each character in our chunk (except last), compute the hidden and ouput\n",
        "    # Using each output, compute the loss with the corresponding target \n",
        "    for c in range(chunk_len):\n",
        "        output, hidden = decoder(inp[c], hidden)\n",
        "        loss += criterion(output, target[c].unsqueeze(0))\n",
        "    \n",
        "    # Backpropagate, clip gradient and optimize\n",
        "    loss.backward()\n",
        "    grad_clipping(decoder, 1)\n",
        "    decoder_optimizer.step()\n",
        "\n",
        "    # Return average loss\n",
        "    return loss.data.item() / chunk_len\n",
        "\n",
        "n_epochs = 3000\n",
        "print_every = 500\n",
        "plot_every = 10\n",
        "hidden_size = 100\n",
        "n_layers = 1\n",
        "lr = 0.0005\n",
        "\n",
        "# Create model, optimizer and loss function\n",
        "decoder = GenerationRNN(n_characters, hidden_size, n_characters, n_layers)\n",
        "decoder_optimizer = torch.optim.Adam(decoder.parameters(), lr=lr)\n",
        "criterion = nn.CrossEntropyLoss()\n",
        "\n",
        "all_losses = []\n",
        "loss_avg = 0\n",
        "\n",
        "# For every epoch\n",
        "for epoch in range(1, n_epochs + 1):\n",
        "    # Get a random (input, target) pair from training set and perform one training iteration    \n",
        "    loss = train(*random_training_set())       \n",
        "    loss_avg += loss\n",
        "    \n",
        "    if epoch % print_every == 0:\n",
        "        text = evaluateMultinomial(decoder, 'th', 50)\n",
        "        print('Epoch '+ str(epoch) + ' --------------------\\n\\t' + text)\n",
        "\n",
        "        \n",
        "    if epoch % plot_every == 0:\n",
        "        all_losses.append(loss_avg / plot_every)\n",
        "        loss_avg = 0\n",
        "\n",
        "with plt.xkcd():\n",
        "    plt.figure()\n",
        "    plt.plot(all_losses)\n",
        "    plt.xlabel('Epochs')\n",
        "    plt.ylabel('Loss')\n",
        "    plt.title('Training loss for text generation')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GcnPSVabNC7m"
      },
      "source": [
        "Is there an improvement in the quality of the generated text? Can you infer what the original text used for training could be? Why do you think generation is a difficult task?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "CAXA-YLuh8rO"
      },
      "source": [
        "generation_quality = '3' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "2yhm2xUlhxff"
      },
      "source": [
        "**Solution**  \n",
        "Yes, the multinomial distribution over the model output does a better job at sampling over characters than simply choosing the best one at each step.   \n",
        "The original text was Julius Caesar, Shakespeare.  \n",
        "Generation is a difficult task because our model chooses the next character prediction out of a large number of possibilities, i.e., the probability is distributed over all the characters in the vocabulary. A significant amount of tuning is required for such models to produce flawless text.       "
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dcPWI_PADVbJ"
      },
      "source": [
        "*Estimated time: 60 minutes since start*"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wv5KIe5Gag6w"
      },
      "source": [
        "## Section 2.2: Sequence Tagging\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "Dzu0yzZL2nZ9",
        "cellView": "form"
      },
      "source": [
        "#@title Video: Tagging\n",
        "try: t3;\n",
        "except NameError: t3=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"OYhW9HndZkA\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "5eA2fYQEZgjr"
      },
      "source": [
        "\n",
        "Sequence tagging is a task in machine learning whose goal is to assign a label (or category) to each unit of a sequence processed in a model. In natural language processing, parts-of-speech tagging and named entity recognition are some popular tagging tasks. \n",
        "\n",
        "Formally, we study tagging as a different task from language modelling or text generation, but there are similarities between them. Instead of choosing the next best  character from the output model, we pass the hidden state of each recurrent unit from the model to a fully connected layer and assign it a label.    "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "sCls1luhZgkh",
        "cellView": "form"
      },
      "source": [
        "#@title Dataloader\n",
        "class TaggingDataset(torch.utils.data.Dataset):\n",
        "    'Characterizes a dataset for PyTorch'\n",
        "\n",
        "    def __init__(self, raw_data):\n",
        "        'Initialization'\n",
        "        self.data = raw_data\n",
        "        self.vowels = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n",
        "\n",
        "    def __len__(self):\n",
        "        'Denotes the total number of samples'\n",
        "        return len(self.data)\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "        'Generates one sample of data'\n",
        "        # Select sample\n",
        "        inp = char_tensor(self.data[index][:-1])\n",
        "        target = []\n",
        "        for c in self.data[index][1:]:\n",
        "            if c in self.vowels:\n",
        "                target.append(0)\n",
        "            elif c == ' ':\n",
        "                target.append(1)\n",
        "            else:\n",
        "                target.append(2)\n",
        "        target = torch.tensor(target)\n",
        "        # print(self.data[index][:-1], target)\n",
        "        return inp, target\n",
        "\n",
        "# Data loaders\n",
        "raw_data = []\n",
        "for i in range(100):\n",
        "    raw_data.append(random_chunk(chunk_len))\n",
        "dataset = TaggingDataset(raw_data)\n",
        "dataloader = torch.utils.data.DataLoader(dataset, batch_size=1)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "kOz10ITGCUvt"
      },
      "source": [
        "### Network"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "t5_a7nZJ6dP3"
      },
      "source": [
        "#### Exercise 5\n",
        "\n",
        "For this exercise, you will train a character-level sequence labelling model on the same dataset as the previous exercise: which takes in a character and a hidden state vector as input, and predicts whether the **next** character in the sequence will be a vowel (V), space (S) or other (O). Much of the code for this task is identical to the code for text generation, so you will be making the most important tweaks in the text generation pipeline that will give you a a tagging pipeline. Note that since we are reducing the prediction space to just three classes, the task becomes much more simple.\n",
        "\n",
        "Structurally, the tagging model looks similar to the text generation one, except the Linear layer must map to the the number of tags or labels defined for your task. Functionally, the hidden state (instead of the output) must be mapped to the Linear layer to perform the classification of the required unit. Use a GRU for the recurrent unit. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "QAviF-gSZgki",
        "cellView": "code"
      },
      "source": [
        "class TaggingRNN(nn.Module):\n",
        "    def __init__(self, input_size, hidden_size, output_size, n_layers=1):\n",
        "        super(TaggingRNN, self).__init__()\n",
        "        self.input_size = input_size\n",
        "        self.hidden_size = hidden_size\n",
        "        self.output_size = output_size\n",
        "        self.n_layers = n_layers\n",
        "        \n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Tagging Init\")\n",
        "        ####################################################################\n",
        "\n",
        "        self.embedding = ...\n",
        "        self.gru = ...\n",
        "        self.linear = ...\n",
        "    \n",
        "    def forward(self, input, hidden):\n",
        "        \"\"\" Be sure to check the shapes of the input and hidden states \"\"\"\n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Tagging Forward\")\n",
        "        ####################################################################\n",
        "\n",
        "        input = ...\n",
        "        output, hidden = ...\n",
        "        out = ...\n",
        "        return out, hidden\n",
        "\n",
        "    def init_hidden(self):\n",
        "        return torch.zeros(self.n_layers, 1, self.hidden_size)\n",
        "\n",
        "# # Uncomment to run\n",
        "# sampleTagger = TaggingRNN(20, 100, 10, 2)\n",
        "# print(sampleTagger)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "sMO5w5v5jJny"
      },
      "source": [
        "Sample Output:\n",
        "\n",
        "```\n",
        "TaggingRNN(\n",
        "  (embedding): Embedding(20, 100)\n",
        "  (gru): GRU(100, 100, num_layers=2)\n",
        "  (linear): Linear(in_features=100, out_features=3, bias=True)\n",
        ")\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "3-NBqvFh7Ul3"
      },
      "source": [
        "# Solution - to remove\n",
        "class TaggingRNN(nn.Module):\n",
        "    def __init__(self, input_size, hidden_size, output_size, n_layers=1):\n",
        "        super(TaggingRNN, self).__init__()\n",
        "        self.input_size = input_size\n",
        "        self.hidden_size = hidden_size\n",
        "        self.output_size = output_size\n",
        "        self.n_layers = n_layers\n",
        "        \n",
        "        self.embedding = nn.Embedding(input_size, hidden_size)\n",
        "        self.gru = nn.GRU(hidden_size, hidden_size, n_layers)\n",
        "        self.linear = nn.Linear(hidden_size, 3)\n",
        "    \n",
        "    def forward(self, input, hidden):\n",
        "        input = self.embedding(input.view(1, -1)) \n",
        "        output, hidden = self.gru(input.view(1, 1, -1), hidden)\n",
        "        out = self.linear(hidden)\n",
        "        return out, hidden\n",
        "\n",
        "    def init_hidden(self):\n",
        "        return torch.zeros(self.n_layers, 1, self.hidden_size)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_J5VUu2vYvzR"
      },
      "source": [
        "### Evaluation\n",
        "\n",
        "Here is a function that performs the Vowel/Space/Other tagging task for each character of the given string. You don't need to code anything, but notice the similarities between it and the evaluation code for the text generation task.  "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "dhp4V-0DZgki",
        "cellView": "code"
      },
      "source": [
        "def evaluate(prime_str, predict_len):\n",
        "    labels = ['Vowel', 'Space', 'Other']\n",
        "\n",
        "    hidden = tagger.init_hidden()\n",
        "    prime_input = char_tensor(prime_str)\n",
        "    build_up = len(prime_str) - predict_len\n",
        "    predicted = prime_str[:build_up]\n",
        "\n",
        "    # Use priming string to \"build up\" hidden state\n",
        "    for p in range(build_up):\n",
        "        _, hidden = tagger(prime_input[p], hidden)\n",
        "\n",
        "    column_width = len('milan_is_a_nice_place')+2\n",
        "    \n",
        "    print('Text'.ljust(column_width) + 'Next Character is ...\\n-------------------------')\n",
        "    \n",
        "    # For each character remaining to be tagged \n",
        "    for p in range(0, predict_len):\n",
        "\n",
        "        # Get it's input tensor \n",
        "        inp = prime_input[build_up + p]\n",
        "        next_char = prime_str[build_up + p]\n",
        "        predicted += next_char\n",
        "\n",
        "        # Pass the input and the previous hidden state to the model\n",
        "        out, hidden = tagger(inp, hidden)\n",
        "        \n",
        "        # Softmax the output and find the best tag\n",
        "        softmax = torch.softmax(out[0], dim=1, dtype=torch.float)\n",
        "        i = torch.argmax(softmax)\n",
        "        label = labels[i]\n",
        "\n",
        "        text = predicted.replace(' ', '_')\n",
        "        print(text.ljust(column_width) + label)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cH1XJEh0ZBpk"
      },
      "source": [
        "### Training process"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "UScx0UkyZgkj",
        "cellView": "form"
      },
      "source": [
        "#@title Train\n",
        "# Single training step\n",
        "def train(inp, target):\n",
        "    # Initialize hidden state, zero the gradients of decoder \n",
        "    hidden = tagger.init_hidden()\n",
        "    tagger.zero_grad()\n",
        "    loss = 0\n",
        "\n",
        "    # For each character in our chunk (except last), compute the hidden and ouput\n",
        "    # Using each output, compute the loss with the corresponding target \n",
        "    for c in range(chunk_len):\n",
        "        \n",
        "        out, hidden = tagger(inp[c], hidden)\n",
        "        loss += criterion(out[0], target[c].unsqueeze(0))\n",
        "        \n",
        "    # Backpropagate and optimize\n",
        "    loss.backward()\n",
        "    optimizer.step()\n",
        "\n",
        "    # Return average loss\n",
        "    return loss.data.item() / chunk_len\n",
        "\n",
        "n_epochs = 12\n",
        "print_every = 5\n",
        "plot_every = 1\n",
        "hidden_size = 100\n",
        "n_layers = 1\n",
        "lr = 0.005\n",
        "\n",
        "# Create model, optimizer and loss function\n",
        "tagger = TaggingRNN(n_characters, hidden_size, n_characters, n_layers)\n",
        "optimizer = torch.optim.Adam(tagger.parameters(), lr=lr)\n",
        "criterion = nn.CrossEntropyLoss()\n",
        "\n",
        "all_losses = []\n",
        "loss_avg = 0\n",
        "\n",
        "\n",
        "# For every epoch\n",
        "for epoch in range(1, n_epochs + 1):\n",
        "\n",
        "    for i, (input, target) in enumerate(dataloader):\n",
        "        loss = train(input[0], target[0])\n",
        "        loss_avg += loss\n",
        "\n",
        "    if epoch % print_every == 0:\n",
        "        print('\\n------ Epoch %d' % epoch)\n",
        "        evaluate('milan is a nice place', 15)\n",
        "    \n",
        "    if epoch % plot_every == 0:\n",
        "        all_losses.append(loss_avg / plot_every)\n",
        "        loss_avg = 0\n",
        "\n",
        "with plt.xkcd():\n",
        "    plt.figure()\n",
        "    plt.plot(all_losses)\n",
        "    plt.xlabel('Epochs')\n",
        "    plt.ylabel('Loss')\n",
        "    plt.title('Training loss for sequence labelling')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "KlGWhtHwceJ4"
      },
      "source": [
        "Where is the model making mistakes? Why is there a difference between the true next character and the Next Character tag? \n",
        "\n",
        "*Hint: Observe the input to the network at each time step in the `evaluate` function*"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "p5xfdP60x5zR",
        "cellView": "form"
      },
      "source": [
        "seq_label_mistakes = '4' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "-rUtyv25x3Pi"
      },
      "source": [
        "How would one evaluate a sequence labelling model? Can you come up with a simple metric?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "bvZqYObtc1zF",
        "cellView": "form"
      },
      "source": [
        "seq_label_evaluate = '5' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "FaGtNpRUD_IX"
      },
      "source": [
        "*Estimated time: 80 minutes since start*"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Ke6ANal_YUI3"
      },
      "source": [
        "## Section 2.3: Sequence to Sequence \n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "0FlARbqG2zXo",
        "cellView": "form"
      },
      "source": [
        "#@title Video: Seq2Seq Modelling\n",
        "try: t4;\n",
        "except NameError: t4=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"DebMnQ7PYg0\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "3npf6-Jq4L5g"
      },
      "source": [
        "Sources: [d2l.ai on encoders](https://d2l.ai/chapter_recurrent-modern/encoder-decoder.html) ; [d2l.ai on seq2seq](https://d2l.ai/chapter_recurrent-modern/seq2seq.html) ; [Jalammar's blog](https://jalammar.github.io/visualizing-neural-machine-translation-mechanics-of-seq2seq-models-with-attention/ )\n",
        "\n",
        "Sequence-to-sequence models take in a sequence of items (words, characters, etc) as input and produces another sequence of items as output. The most \n",
        "simple seq2seq models are composed of two parts: the encoder, the context (\"state\" in the figure) and the decoder. The encoder and decoder usually consist of recurrent units that we've seen before (RNNs, GRUs or LSTMs). A high-level schematic of the architecture is as follows:\n",
        "\n",
        "![image.png]()\n",
        "\n",
        "The encoder's recurrent unit processes the input one item at a time. Once the entire sequence is processed, the final hidden state vector produced is known as a context vector. The size of the context vector is defined while setting up the model, and is equal to the number of hidden states used in the encoder RNN. The encoder then passes the context to the decoder. The decoder's recurrent unit uses the context to produce the items for the output sequence one by one.   \n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "ThYoNqMDXs4t"
      },
      "source": [
        "One of the most popular applications of seq2seq models is \"machine translation\": the task of taking in a sentence in one language (the source) and producing its translation in another language (the target); with words in both lanugages being the sequence units. This is a supervised learning task, and requires the dataset to have \"parallel sentences\"; i.e., each sentence in the source language must be labelled with its translation in the target language. \n",
        "\n",
        "[Here is an intuitive visualization for understanding seq2seq models for machine translation from English to French](https://i.imgur.com/HJ6t8up.mp4)\n",
        "\n",
        "Since the vocabulary of an entire language is very large, training such models to give meaningful performance requires significant time and resources. In this section, we will train a seq2seq model to perform machine translation from English to [Pig-Latin](https://en.wikipedia.org/wiki/Pig_Latin). We will modify the task to perform character-level machine translation, so that vocabulary size does not grow exponentially.   \n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wjOCGYjDoqaD"
      },
      "source": [
        "### Setup"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "RVN0flAcxvZT",
        "cellView": "form"
      },
      "source": [
        "#@title Preprocessing\n",
        "! wget https://sourceforge.net/projects/wordlist/files/speller/2020.12.07/wordlist-en_US-large-2020.12.07.zip\n",
        "\n",
        "! unzip wordlist-en_US-large-2020.12.07.zip\n",
        "\n",
        "def t(str):\n",
        "    return str[0]+str[1]\n",
        "def pig_latinize(word):\n",
        "    lst = ['sh', 'gl', 'ch', 'ph', 'tr', 'br', 'fr', 'bl', 'gr', 'st', 'sl', 'cl', 'pl', 'fl']\n",
        "    # sentence = input('Type what you would like translated into pig-latin and press ENTER: ')\n",
        "    # sentence = sentence.split()\n",
        "    i = word\n",
        "    if i[0] in ['a', 'e', 'i', 'o', 'u']:\n",
        "        word = i+'ay'\n",
        "    elif t(i) in lst:\n",
        "        word = i[2:]+i[:2]+'ay'\n",
        "    elif i.isalpha() == False:\n",
        "        word = i\n",
        "    else:\n",
        "        word = i[1:]+i[0]+'ay'\n",
        "    return word\n",
        "\n",
        "def read_data():\n",
        "    # data_folder = 'scowl-2020.12.07/final'\n",
        "    word_list = []\n",
        "    # for file_ in os.listdir(data_folder):\n",
        "    #     if 'american-words' in file_:\n",
        "    with open('en_US-large.txt') as f:\n",
        "        word_list.extend(f.readlines())\n",
        "\n",
        "    clean_wordlist = [unidecode.unidecode(w.strip().lower()) for w in word_list if w.strip().isalpha() and len(w.strip())>2 and len(w.strip())<6]\n",
        "\n",
        "    data = []\n",
        "    for word in clean_wordlist:\n",
        "        data.append(word + ' ' + pig_latinize(word))\n",
        "\n",
        "    return data\n",
        "\n",
        "def tokenize_nmt(text, num_examples=None):\n",
        "    \"\"\"Tokenize the English-French dataset.\"\"\"\n",
        "    source, target = [], []\n",
        "    source_char_set = set()\n",
        "    target_char_set = set()\n",
        "    for i, line in enumerate(text):\n",
        "        if num_examples and i > num_examples:\n",
        "            break\n",
        "        parts = line.split(' ')\n",
        "        # parts = line.split('\\t')\n",
        "        if len(parts) == 2:\n",
        "            src_txt, tgt_txt = parts\n",
        "            cur_src, cur_tgt = [], []\n",
        "            for c in src_txt:\n",
        "                cur_src.append(c)\n",
        "                source_char_set.add(c)\n",
        "            for c in tgt_txt:\n",
        "                cur_tgt.append(c)\n",
        "                target_char_set.add(c)\n",
        "            source.append(cur_src)\n",
        "            target.append(cur_tgt)\n",
        "\n",
        "    special_tokens = ['<eos>', '<bos>', '<pad>']\n",
        "    for tok in special_tokens:\n",
        "        source_char_set.add(tok)\n",
        "        target_char_set.add(tok)    \n",
        "    return source, target, sorted(list(source_char_set)), sorted(list(target_char_set)) "
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "8QJYIXgwBBjB"
      },
      "source": [
        "The following cell retrieves about 29,000 random English words and their Pig Latin translations. We then tokenize each word and its translation; in this case, our tokens are characters. We also create vocabularies for the source and target languages; and a two-way mapping for each (index to token and token to index). Finally, we pick the value for `NUM_STEPS` as the size of the largest sequence in either language. This value would mark the maximum size of the sequence accepted by our models.  "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "lvw_b22Rh1qT"
      },
      "source": [
        "data = read_data()\n",
        "\n",
        "source, target, source_vocab, target_vocab = tokenize_nmt(data)\n",
        "\n",
        "source_idx2token = dict((i, char) for i, char in enumerate(source_vocab))\n",
        "target_idx2token = dict((i, char) for i, char in enumerate(target_vocab))\n",
        "source_vocab = dict((char, i) for i, char in enumerate(source_vocab))\n",
        "target_vocab = dict((char, i) for i, char in enumerate(target_vocab))\n",
        "\n",
        "NUM_STEPS = max(max([len(s) for s in source]), max([len(t) for t in target]))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "origin_pos": 12,
        "id": "ve74aqidfOua"
      },
      "source": [
        "### Padding\n",
        "\n",
        "The input sequences to our model can vary in length, so it is often convenient to have a consistent length among all inputs to the model (This is not required by recurrent models, but makes it easier to control minibatch size). If our defined maximum sequence length is $M$ and a given input sequence is less than that, then we pad it with zeros (our \"special padding symbol:, until its length becomes $M$.      \n",
        "\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "P6AINYCVDp6l"
      },
      "source": [
        "#### Exercise 6\n",
        "\n",
        "Implement a function below that performs truncation and padding based on a list of tokens (characters) `line` given to it.  "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "origin_pos": 15,
        "tab": [
          "pytorch"
        ],
        "id": "R6x0WGjpfOue"
      },
      "source": [
        "def truncate_pad(line, num_steps, padding_token):\n",
        "    \"\"\"Truncate or pad sequences.\"\"\"\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Truncate and Pad\")\n",
        "    ####################################################################\n",
        "    \n",
        "    # Truncate\n",
        "    if ...:\n",
        "        return ...  \n",
        "    \n",
        "    # Pad\n",
        "    number_of_pad_tokens = ...\n",
        "    padding_list = ...\n",
        "\n",
        "    return line + padding_list\n",
        "\n",
        "# # Uncomment to run\n",
        "# word = ['e', 't', 'u', 'i', 's']\n",
        "# input = [source_vocab[c] for c in word]\n",
        "# print('Input test: ', word)\n",
        "# o1 = truncate_pad(input, 10, source_vocab['<pad>'])\n",
        "# x = [source_idx2token[i] for i in o1]\n",
        "# print(x)\n",
        "# o1 = truncate_pad(input, 1, source_vocab['<pad>'])\n",
        "# x = [source_idx2token[i] for i in o1]\n",
        "# print(x)\n",
        "# o1 = truncate_pad(input, 5, source_vocab['<pad>'])\n",
        "# x = [source_idx2token[i] for i in o1]\n",
        "# print(x)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "StczcLzBYN6G"
      },
      "source": [
        "Sample Output:\n",
        "\n",
        "\n",
        "\n",
        "```\n",
        "Input test:  ['e', 't', 'u', 'i', 's']\n",
        "['e', 't', 'u', 'i', 's', '<pad>', '<pad>', '<pad>', '<pad>', '<pad>']\n",
        "['e']\n",
        "['e', 't', 'u', 'i', 's']\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "iCaOuiPBD7b5"
      },
      "source": [
        "# Solution - to remove\n",
        "def truncate_pad(line, num_steps, padding_token):\n",
        "    \"\"\"Truncate or pad sequences.\"\"\"\n",
        "    # Truncate\n",
        "    if len(line) > num_steps:\n",
        "        return line[:num_steps]\n",
        "\n",
        "    # Pad\n",
        "    number_of_pad_tokens = num_steps - len(line)\n",
        "    padding_list = [padding_token] * number_of_pad_tokens\n",
        "\n",
        "    return line + padding_list"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "origin_pos": 17,
        "tab": [
          "pytorch"
        ],
        "id": "KbgiRfyIfOuj",
        "cellView": "form"
      },
      "source": [
        "#@title Dataloaders\n",
        "\n",
        "def build_array(data, vocab, num_steps):\n",
        "    \"\"\"Transform text sequences of machine translation into minibatches.\"\"\"\n",
        "    complete_data, lengths = [], []\n",
        "    for lines in data:\n",
        "        lines = [vocab[l] for l in lines]\n",
        "        lines.append(vocab['<eos>'])\n",
        "        array = torch.tensor(truncate_pad(lines, num_steps, vocab['<pad>']))\n",
        "        valid_len = (array != vocab['<pad>']).type(torch.int32).sum(0)\n",
        "        complete_data.append(array)\n",
        "        lengths.append(valid_len.item())\n",
        "    return torch.stack(complete_data), torch.tensor(lengths)\n",
        "\n",
        "class MTDataset(torch.utils.data.Dataset):\n",
        "    'Characterizes a dataset for PyTorch'\n",
        "\n",
        "    def __init__(self, source, target):\n",
        "        'Initialization'\n",
        "        source_data, source_lens = build_array(source, source_vocab, NUM_STEPS)        \n",
        "        target_data, target_lens = build_array(target, target_vocab, NUM_STEPS)\n",
        "        self.source = source_data\n",
        "        self.source_len = source_lens\n",
        "        self.target = target_data\n",
        "        self.target_len = target_lens\n",
        "\n",
        "    def __len__(self):\n",
        "        'Denotes the total number of samples'\n",
        "        return len(self.source)\n",
        "\n",
        "    def __getitem__(self, index):\n",
        "        'Generates one sample of data'\n",
        "        # Select sample\n",
        "        return self.source[index], self.source_len[index], self.target[index], self.target_len[index]"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "46yWeKnsgVtQ"
      },
      "source": [
        "### Encoder and Decoder Architecture\n"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "cARmwF5bFx65"
      },
      "source": [
        "#### Exercise 7\n",
        "\n",
        "Implement the forward functions for the Encoder and Decoder of the seq2seq model as directed. \n",
        "\n",
        "The Encoder model is very similar to what you have seen so far:\n",
        "* Get the embedding of the input `X`\n",
        "* Pass `X` through the recurrent unit. You can define your own initial hidden state, or omit it (in which case a tensor of zeros is used). \n",
        "* There is no linear layer \n",
        "\n",
        "You will notice that the Decoder also contains an embedding layer; something that wasn't mentioned during the initial explanation of the seq2seq model. This is because we want to apply \"teacher forcing\" to this problem. Teacher forcing is a strategy for training RNNs that uses model output from a prior time step as an input. Specifically, \n",
        "* Get the embedding of `X` (which is actually output from the previous time step)\n",
        "* Concatenate it with the previous hidden state\n",
        "* To the recurrent unit of the decoder pass this concatenation as \"input\"; and pass the previous hidden state as \"hidden\"\n",
        "* Pass the output of the recurrent unit through the linear layer     \n",
        "\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "kU4cY7gtgZgo"
      },
      "source": [
        "class Encoder(nn.Module):\n",
        "    \"\"\"The RNN encoder for sequence to sequence learning.\"\"\"\n",
        "    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,\n",
        "                 dropout=0):\n",
        "        super(Encoder, self).__init__()\n",
        "        \n",
        "        self.embedding = nn.Embedding(vocab_size, embed_size)\n",
        "        self.gru = nn.GRU(embed_size, num_hiddens, num_layers,\n",
        "                          dropout=dropout)\n",
        "\n",
        "    def forward(self, X):\n",
        "        \"\"\"Hint: always make sure your sizes are correct\"\"\"\n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Encoder Forward\")\n",
        "        ####################################################################\n",
        "        \n",
        "        # The output `X` shape: (`batch_size`, `num_steps`, `embed_size`)\n",
        "        \n",
        "        # Embedding layer + reshape\n",
        "        X = ...\n",
        "\n",
        "        # Recurrent unit\n",
        "        \n",
        "        # `output` shape: (`num_steps`, `batch_size`, `num_hiddens`)\n",
        "        # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)\n",
        "        return output, state\n",
        "\n",
        "\n",
        "class Decoder(nn.Module):\n",
        "    \"\"\"The RNN decoder for sequence to sequence learning.\"\"\"\n",
        "    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,\n",
        "                 dropout=0):\n",
        "        super(Decoder, self).__init__()\n",
        "        self.embedding = nn.Embedding(vocab_size, embed_size)\n",
        "        \n",
        "        self.gru = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers,\n",
        "                          dropout=dropout)\n",
        "        self.dense = nn.Linear(num_hiddens, vocab_size)\n",
        "        \n",
        "    def init_state(self, enc_outputs):\n",
        "        return enc_outputs[1]\n",
        "\n",
        "    def forward(self, X, state):\n",
        "        \"\"\"Hint: always make sure your sizes are correct\"\"\"\n",
        "        ####################################################################\n",
        "        # Fill in missing code below (...),\n",
        "        # then remove or comment the line below to test your function\n",
        "        raise NotImplementedError(\"Decoder Forward\")\n",
        "        ####################################################################\n",
        "\n",
        "\n",
        "        # The output `X` shape: (`num_steps`, `batch_size`, `embed_size`)\n",
        "        X = ...\n",
        "        # Broadcast `context` so it has the same `num_steps` as `X`\n",
        "        context = ...\n",
        "        \n",
        "        # Concatenate X and context \n",
        "        X_and_context = ...\n",
        "        \n",
        "        # Recurrent unit\n",
        "        output, state = self.rnn(X_and_context, state)\n",
        "        \n",
        "        # Linear layer\n",
        "        output = ...\n",
        "\n",
        "        # `output` shape: (`batch_size`, `num_steps`, `vocab_size`)\n",
        "        # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)\n",
        "        return output, state\n",
        "\n",
        "# # Uncomment to run\n",
        "# encoder = Encoder(1000, 300, 100, 2, 0.1)\n",
        "# decoder = Decoder(1000, 300, 100, 2, 0.1)\n",
        "# print(encoder)\n",
        "# print(decoder)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "tSGF3zG6at6j"
      },
      "source": [
        "Sample Output:\n",
        "\n",
        "```\n",
        "Encoder(\n",
        "  (embedding): Embedding(1000, 300)\n",
        "  (gru): GRU(300, 100, num_layers=2, dropout=0.1)\n",
        ")\n",
        "Decoder(\n",
        "  (embedding): Embedding(1000, 300)\n",
        "  (gru): GRU(400, 100, num_layers=2, dropout=0.1)\n",
        "  (dense): Linear(in_features=100, out_features=1000, bias=True)\n",
        "  (dropout): Dropout(p=0.25, inplace=False)\n",
        ")\n",
        "```\n",
        "\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "tSUthmnPtzgS"
      },
      "source": [
        "# Solution - to remove\n",
        "class Encoder(nn.Module):\n",
        "    \"\"\"The RNN encoder for sequence to sequence learning.\"\"\"\n",
        "    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,\n",
        "                 dropout=0):\n",
        "        super(Encoder, self).__init__()\n",
        "        # Embedding layer\n",
        "        self.embedding = nn.Embedding(vocab_size, embed_size)\n",
        "        self.gru = nn.GRU(embed_size, num_hiddens, num_layers,\n",
        "                          dropout=dropout)\n",
        "\n",
        "    def forward(self, X):\n",
        "        \"\"\"Hint: always make sure your sizes are correct\"\"\"\n",
        "        # The output `X` shape: (`batch_size`, `num_steps`, `embed_size`)\n",
        "        X = self.embedding(X)\n",
        "\n",
        "        # In RNN models, the first axis corresponds to time steps\n",
        "        X = X.permute(1, 0, 2)\n",
        "        # When state is not mentioned, it defaults to zeros\n",
        "        output, state = self.gru(X)\n",
        "        # `output` shape: (`num_steps`, `batch_size`, `num_hiddens`)\n",
        "        # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)\n",
        "        return output, state\n",
        "\n",
        "\n",
        "class Decoder(nn.Module):\n",
        "    \"\"\"The RNN decoder for sequence to sequence learning.\"\"\"\n",
        "    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,\n",
        "                 dropout=0):\n",
        "        super(Decoder, self).__init__()\n",
        "        self.embedding = nn.Embedding(vocab_size, embed_size)\n",
        "        \n",
        "        self.gru = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers,\n",
        "                          dropout=dropout)\n",
        "        self.dense = nn.Linear(num_hiddens, vocab_size)\n",
        "        self.dropout = nn.Dropout(0.25)\n",
        "\n",
        "    def init_state(self, enc_outputs):\n",
        "        return enc_outputs[1]\n",
        "\n",
        "    def forward(self, X, state):\n",
        "        \"\"\"Hint: always make sure your sizes are correct\"\"\"\n",
        "        # The output `X` shape: (`num_steps`, `batch_size`, `embed_size`)\n",
        "        X = self.embedding(X).permute(1, 0, 2)\n",
        "        # Broadcast `context` so it has the same `num_steps` as `X`\n",
        "        context = state[-1].repeat(X.shape[0], 1, 1)\n",
        "        X_and_context = torch.cat((X, context), 2)\n",
        "        output, state = self.gru(X_and_context, state)\n",
        "        output = self.dense(output).permute(1, 0, 2)\n",
        "        # `output` shape: (`batch_size`, `num_steps`, `vocab_size`)\n",
        "        # `state` shape: (`num_layers`, `batch_size`, `num_hiddens`)\n",
        "        return output, state"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "TceYwj1cGUas"
      },
      "source": [
        "class EncoderDecoder(nn.Module):\n",
        "    \"\"\"The base class for the encoder-decoder architecture.\"\"\"\n",
        "    def __init__(self, encoder, decoder):\n",
        "        super(EncoderDecoder, self).__init__()\n",
        "        self.encoder = encoder\n",
        "        self.decoder = decoder\n",
        "\n",
        "    def forward(self, enc_X, dec_X):\n",
        "        enc_outputs = self.encoder(enc_X)\n",
        "        dec_state = self.decoder.init_state(enc_outputs)\n",
        "        return self.decoder(dec_X, dec_state)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "wvGL0TXOlSC_"
      },
      "source": [
        "### Training"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "XIHjicCUgrOT",
        "cellView": "form"
      },
      "source": [
        "#@title Masked Loss Function\n",
        "def sequence_mask(X, valid_len, value=0):\n",
        "    \"\"\"Mask irrelevant entries in sequences.\"\"\"\n",
        "    maxlen = X.size(1)\n",
        "    mask = torch.arange((maxlen), dtype=torch.float32,\n",
        "                        device=X.device)[None, :] < valid_len[:, None]\n",
        "    X[~mask] = value\n",
        "    return X\n",
        "\n",
        "class MaskedSoftmaxCELoss(nn.CrossEntropyLoss):\n",
        "    \"\"\"The softmax cross-entropy loss with masks.\"\"\"\n",
        "    # `pred` shape: (`batch_size`, `num_steps`, `vocab_size`)\n",
        "    # `label` shape: (`batch_size`, `num_steps`)\n",
        "    # `valid_len` shape: (`batch_size`,)\n",
        "    def forward(self, pred, label, valid_len):\n",
        "        weights = torch.ones_like(label)\n",
        "        weights = sequence_mask(weights, valid_len)\n",
        "        self.reduction='none'\n",
        "        unweighted_loss = super(MaskedSoftmaxCELoss, self).forward(\n",
        "            pred.permute(0, 2, 1), label)\n",
        "        weighted_loss = (unweighted_loss * weights).mean(dim=1)\n",
        "        return weighted_loss"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "origin_pos": 32,
        "tab": [
          "pytorch"
        ],
        "id": "XGk5edV6g7E7",
        "cellView": "form"
      },
      "source": [
        "#@title Train\n",
        "def train_seq2seq(net, data_loader, lr, num_epochs, tgt_vocab, device):\n",
        "    \"\"\"Train a model for sequence to sequence.\"\"\"\n",
        "    def xavier_init_weights(m):\n",
        "        if type(m) == nn.Linear:\n",
        "            nn.init.xavier_uniform_(m.weight)\n",
        "        if type(m) == nn.GRU:\n",
        "            for param in m._flat_weights_names:\n",
        "                if \"weight\" in param:\n",
        "                    nn.init.xavier_uniform_(m._parameters[param])\n",
        "    net.apply(xavier_init_weights)\n",
        "    net.to(device)\n",
        "    optimizer = torch.optim.Adam(net.parameters(), lr=lr)\n",
        "    loss = MaskedSoftmaxCELoss()\n",
        "    \n",
        "    dataset = MTDataset(source, target)\n",
        "    dataloader = torch.utils.data.DataLoader(dataset, batch_size=64)\n",
        "\n",
        "    net.train()\n",
        "    animator = d2l.Animator(xlabel='epoch', ylabel='loss',\n",
        "                            xlim=[10, num_epochs])\n",
        "    for epoch in range(num_epochs):\n",
        "        # TODO: without d2l?\n",
        "        timer = d2l.Timer()\n",
        "        metric = d2l.Accumulator(2)  # Sum of training loss, no. of tokens\n",
        "        for batch in dataloader:\n",
        "            X, X_valid_len, Y, Y_valid_len = [x.to(device) for x in batch]\n",
        "            bos = torch.tensor([tgt_vocab['<bos>']] * Y.shape[0],\n",
        "                               device=device).reshape(-1, 1)\n",
        "            \n",
        "            dec_input = torch.cat([bos, Y[:, :-1]], 1)  # Teacher forcing\n",
        "            Y_hat, _ = net(X, dec_input)\n",
        "            l = loss(Y_hat, Y, Y_valid_len)\n",
        "            l.sum().backward()  # Make the loss scalar for `backward`\n",
        "            \n",
        "            # torch.nn.utils.clip_grad_norm_(parameters=net.parameters(), max_norm=1)\n",
        "            grad_clipping(net, 1)\n",
        "            num_tokens = Y_valid_len.sum()\n",
        "            optimizer.step()\n",
        "            with torch.no_grad():\n",
        "                metric.add(l.sum(), num_tokens)\n",
        "\n",
        "        if (epoch + 1) % 10 == 0:\n",
        "            animator.add(epoch + 1, (metric[0] / metric[1],))\n",
        "    print(f'loss {metric[0] / metric[1]:.3f}, {metric[1] / timer.stop():.1f} '\n",
        "          f'tokens/sec on {str(device)}')"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_AFgnQs8B032"
      },
      "source": [
        "This cell below, takes about 10 minutes to run. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "hEwHHZM69cQO"
      },
      "source": [
        "embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1\n",
        "\n",
        "lr, num_epochs, device = 0.005, 150, torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "\n",
        "dataset = MTDataset(source, target)\n",
        "dataloader = torch.utils.data.DataLoader(dataset, batch_size=64)\n",
        "\n",
        "encoder = Encoder(\n",
        "    len(source_vocab), embed_size, num_hiddens, num_layers, dropout)\n",
        "decoder = Decoder(\n",
        "    len(target_vocab), embed_size, num_hiddens, num_layers, dropout)\n",
        "net = EncoderDecoder(encoder, decoder)\n",
        "train_seq2seq(net, dataloader, lr, num_epochs, target_vocab, device)"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "XeqT8Qk6nr_P"
      },
      "source": [
        "### Evaluation\n",
        "\n",
        "How do we know that we have obtained a good translation? BLEU (Bilingual Evaluation Understudy) is a metric that was developed specifically for this purpose. If you're curious, you can check out the details of the metric [here](https://d2l.ai/chapter_recurrent-modern/seq2seq.html?highlight=bleu#evaluation-of-predicted-sequences). For now, all you need to know is that a BLEU score lies between 0 and 1; the closer you are to 1; the better your translation is.   "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "origin_pos": 39,
        "tab": [
          "pytorch"
        ],
        "id": "53e59NY1g7FA",
        "cellView": "form"
      },
      "source": [
        "#@title Compute BLEU\n",
        "def bleu(pred_seq, label_seq, k):  #@save\n",
        "    \"\"\"Compute the BLEU.\"\"\"\n",
        "    pred_tokens, label_tokens = [c for c in pred_seq], [c for c in label_seq]\n",
        "    len_pred, len_label = len(pred_tokens), len(label_tokens)\n",
        "    score = math.exp(min(0, 1 - len_label / len_pred))\n",
        "    for n in range(1, k + 1):\n",
        "        num_matches, label_subs = 0, collections.defaultdict(int)\n",
        "        for i in range(len_label - n + 1):\n",
        "            label_subs[''.join(label_tokens[i: i + n])] += 1\n",
        "        for i in range(len_pred - n + 1):\n",
        "            if label_subs[''.join(pred_tokens[i: i + n])] > 0:\n",
        "                num_matches += 1\n",
        "                label_subs[''.join(pred_tokens[i: i + n])] -= 1\n",
        "        score *= math.pow(num_matches / (len_pred - n + 1), math.pow(0.5, n))\n",
        "    return score"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Hwpllf_QG1QF"
      },
      "source": [
        "#### Exercise 8\n",
        "\n",
        "Implement a function to make a translation of a given word. "
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "origin_pos": 37,
        "tab": [
          "pytorch"
        ],
        "id": "YQIrj4vAg7E-"
      },
      "source": [
        "def predict_seq2seq(net, src_sentence, device):\n",
        "    \"\"\"Predict for sequence to sequence.\"\"\"\n",
        "\n",
        "    ####################################################################\n",
        "    # Fill in missing code below (...),\n",
        "    # then remove or comment the line below to test your function\n",
        "    raise NotImplementedError(\"Seq2seq prediction\")\n",
        "    ####################################################################\n",
        "\n",
        "    # Set `net` to eval mode for inference\n",
        "    net.eval()\n",
        "\n",
        "    # Get source language tokens \n",
        "    src_tokens = [source_vocab[c] for c in src_sentence.lower()] + [source_vocab['<eos>']]\n",
        "    # Truncate/pad the tokens\n",
        "    src_tokens = ...\n",
        "    # Create encoder tensor of the right size \n",
        "    enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)\n",
        "    \n",
        "    # Pass the source sentence tensor to the encoder\n",
        "    enc_outputs = ...\n",
        "\n",
        "    # Get the encoder hidden state to pass to the decoder\n",
        "    dec_state = ...\n",
        "    \n",
        "    # Create an input with the <BOS> tag to be passed to the decoder (part of teacher forcing)\n",
        "    dec_X = torch.unsqueeze(torch.tensor([target_vocab['<bos>']], dtype=torch.long, device=device), dim=0)\n",
        "    output_seq = []\n",
        "    \n",
        "    # Predict characters for a maximum of the max sequence length\n",
        "    for _ in range(NUM_STEPS):\n",
        "\n",
        "        # Pass the decoder input + hidden state to the decoder\n",
        "        Y, dec_state = ...\n",
        "\n",
        "        # We use the token with the highest prediction likelihood as the input\n",
        "        # of the decoder at the next time step\n",
        "        dec_X = ...\n",
        "        pred = dec_X.squeeze(dim=0).type(torch.int32).item()\n",
        "        \n",
        "        # Once the end-of-sequence token is predicted, the generation of the\n",
        "        # output sequence is complete\n",
        "        if pred == target_vocab['<eos>']:\n",
        "            ...\n",
        "        \n",
        "        output_seq.append(pred)\n",
        "    return output_seq"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "S1j0mV9RG-cO"
      },
      "source": [
        "# Solution - to remove\n",
        "def predict_seq2seq(net, src_sentence):\n",
        "    \"\"\"Predict for sequence to sequence.\"\"\"\n",
        "    # Set `net` to eval mode for inference\n",
        "    net.eval()\n",
        "    src_tokens = [source_vocab[c] for c in src_sentence.lower()] + [source_vocab['<eos>']]\n",
        "    # enc_valid_len = torch.tensor([len(src_tokens)], device=device)\n",
        "    src_tokens = truncate_pad(src_tokens, NUM_STEPS, source_vocab['<pad>'])\n",
        "    # Add the batch axis\n",
        "    enc_X = torch.unsqueeze(torch.tensor(src_tokens, dtype=torch.long, device=device), dim=0)\n",
        "    enc_outputs = net.encoder(enc_X)\n",
        "    dec_state = net.decoder.init_state(enc_outputs)\n",
        "    # Add the batch axis\n",
        "    dec_X = torch.unsqueeze(torch.tensor([target_vocab['<bos>']], dtype=torch.long, device=device), dim=0)\n",
        "    output_seq, attention_weight_seq = [], []\n",
        "    for _ in range(NUM_STEPS):\n",
        "        Y, dec_state = net.decoder(dec_X, dec_state)\n",
        "        # We use the token with the highest prediction likelihood as the input\n",
        "        # of the decoder at the next time step\n",
        "        dec_X = Y.argmax(dim=2)\n",
        "        pred = dec_X.squeeze(dim=0).type(torch.int32).item()\n",
        "        # Once the end-of-sequence token is predicted, the generation of the\n",
        "        # output sequence is complete\n",
        "        if pred == target_vocab['<eos>']:\n",
        "            break\n",
        "        output_seq.append(pred)\n",
        "    return output_seq"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "w0ekGmulmffM"
      },
      "source": [
        "engs = ['middle', 'funny', 'hour', 'igloo', 'vendor', 'moody']\n",
        "pig_latin = ['iddlemay', 'unnyfay', 'ourhay', 'iglooway', 'endorvay', 'oodymay']\n",
        "\n",
        "column_width = 18\n",
        "    \n",
        "print('English'.ljust(column_width) + 'Pig Latin'.ljust(column_width) + 'Translation'.ljust(column_width) + 'BLEU\\n--------------------------------------------------------------')\n",
        "\n",
        "for eng, pig in zip(engs, pig_latin):\n",
        "    translation = predict_seq2seq(net, eng)\n",
        "    translation = ''.join([target_idx2token[i] for i in translation])\n",
        "    print(eng.ljust(column_width) + pig.ljust(column_width) + translation.ljust(column_width) + '%.3f' % bleu(translation, pig, k=2))\n",
        "    # print('English: %s; Pig Latin: %s; Translation: %s; BLEU: %.3f' % (eng, pig, translation, bleu(translation, pig, k=2)))"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "7-liy3a_3Nz_"
      },
      "source": [
        "Besides machine translation, can you think of another application where the encoder-decoder architecture can be applied?"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "eBY02PMv43wY",
        "cellView": "form"
      },
      "source": [
        "seq2seq_application = '6' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "L5LnHDFOENPX"
      },
      "source": [
        "*Estimated time: 120 minutes since start*"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_hiELIhAoI_m"
      },
      "source": [
        "---\n",
        "# Section 3. Societal Issues: Hate Speech Classification Challenges\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "SjpsTqxB2enL",
        "cellView": "form"
      },
      "source": [
        "#@title Video: Hate Speech and Society\n",
        "try: t5;\n",
        "except NameError: t5=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"3LmK5jziW64\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "_T3ASFgnByTb"
      },
      "source": [
        "Social media platforms such as Facebook and Twitter have been considered \"de facto public squares.\"  There has been much public debate among legal scholars, tech executives, and governments on how best to address hate speech on these platforms.  Read the following article as a primer on hate speech and social media.\n",
        "\n",
        "\"Social Media and Online Speech: How Should Countries Regulate Tech Giants?\"  \n",
        "https://www.cfr.org/in-brief/social-media-and-online-speech-how-should-countries-regulate-tech-giants\n",
        "\n",
        "*Council on Foreign Relations, 2021* [4-minute read]\n",
        "\n",
        "Discuss with your pod and summarize the main points of the article in one paragraph (~5 sentences)."
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "cellView": "form",
        "id": "gb3KWSNOu9xy"
      },
      "source": [
        "societal_issues = '7' #@param {type: \"string\"}"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "T7NSFhI5Edni"
      },
      "source": [
        "*Estimated time: 135 minutes since start*"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "X-hYAtkAoMyo"
      },
      "source": [
        "---\n",
        "# Final Project \n",
        "\n",
        "The final project is a chance for you to use what you learned in this course.\n",
        "\n",
        "*   The key is to find an interesting project for which you can get data; See examples of AI for social good\n",
        "*   You should be getting feedback from your pod TA every week\n",
        "so there are no surprises when the project grade comes\n",
        "* See the [handout](https://docs.google.com/document/d/1DYFwyk53VMQu6zCPOtq9tzmqfQkdVhHpUatuU87Y3Tk) for details\n",
        "\n",
        "Now: meet with your pod to discuss ideas for the final project and divide yourselves into final project groups of 2 or 3 people."
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gbgTqgB5rH_W"
      },
      "source": [
        "---\n",
        "# Wrap up\n"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "NJ61Rzja3QFR",
        "cellView": "form"
      },
      "source": [
        "#@title Video: Wrap Up\n",
        "try: t6;\n",
        "except NameError: t6=time.time()\n",
        "\n",
        "video = YouTubeVideo(id=\"bnnxSA_mDNA\", width=854, height=480, fs=1)\n",
        "print(\"Video available at https://youtube.com/watch?v=\" + video.id)\n",
        "\n",
        "video\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "dIhJv7ycxxsW"
      },
      "source": [
        "On your own (for fun; you don't need to hand in anything): take a look at some of the many fun applications on the web. For each one, guess which architecture it is using.\n",
        "\n",
        "* Speech to text: [alien maths](https://www.alienmaths.com/)\n",
        "\n",
        "* Image captioning:  [google images](https://blog.google/products/search/get-more-useful-information-captions-google-images/)"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "98YXpSntrY6k",
        "cellView": "form"
      },
      "source": [
        "#@markdown #Run Cell to Show Airtable Form\n",
        "#@markdown ##**Confirm your answers and then click \"Submit\"**\n",
        "\n",
        "import time\n",
        "import numpy as np\n",
        "import urllib.parse\n",
        "from IPython.display import IFrame\n",
        "def prefill_form(src, fields: dict):\n",
        "  '''\n",
        "  src: the original src url to embed the form\n",
        "  fields: a dictionary of field:value pairs,\n",
        "  e.g. {\"pennkey\": my_pennkey, \"location\": my_location}\n",
        "  '''\n",
        "  prefill_fields = {}\n",
        "  for key in fields:\n",
        "      new_key = 'prefill_' + key\n",
        "      prefill_fields[new_key] = fields[key]\n",
        "  prefills = urllib.parse.urlencode(prefill_fields)\n",
        "  src = src + prefills\n",
        "  return src\n",
        "\n",
        "\n",
        "#autofill time if it is not present\n",
        "try: t0;\n",
        "except NameError: t0 = time.time()\n",
        "try: t1;\n",
        "except NameError: t1 = time.time()\n",
        "try: t2;\n",
        "except NameError: t2 = time.time()\n",
        "try: t3;\n",
        "except NameError: t3 = time.time()\n",
        "try: t4;\n",
        "except NameError: t4 = time.time()\n",
        "try: t5;\n",
        "except NameError: t5 = time.time()\n",
        "try: t6;\n",
        "except NameError: t6 = time.time()\n",
        "try: t7;\n",
        "except NameError: t7 = time.time()\n",
        "\n",
        "#autofill fields if they are not present\n",
        "#a missing pennkey and pod will result in an Airtable warning\n",
        "#which is easily fixed user-side.\n",
        "try: my_pennkey;\n",
        "except NameError: my_pennkey = \"\"\n",
        "try: my_pod;\n",
        "except NameError: my_pod = \"Select\"\n",
        "try: lstm_range;\n",
        "except NameError: lstm_range = \"\"\n",
        "try: generation_output;\n",
        "except NameError: generation_output = \"\"\n",
        "try: generation_quality;\n",
        "except NameError: generation_quality = \"\"\n",
        "try: seq2seq_application;\n",
        "except NameError: seq2seq_application = \"\"\n",
        "try: seq_label_evaluate;\n",
        "except NameError: seq_label_evaluate = \"\"\n",
        "try: seq_label_mistakes;\n",
        "except NameError: seq_label_mistakes = \"\"\n",
        "try: societal_issues;\n",
        "except NameError: societal_issues = \"\"\n",
        "\n",
        "times = np.array([t1,t2,t3,t4,t5,t6,t7])-t0\n",
        "\n",
        "fields = {\n",
        "        \"my_pennkey\": my_pennkey,\n",
        "        \"my_pod\": my_pod, \n",
        "        \"lstm_range\": lstm_range,\n",
        "        \"generation_output\": generation_output,\n",
        "        \"generation_quality\": generation_quality,\n",
        "        \"seq2seq_application\": seq2seq_application,\n",
        "        \"seq_label_evaluate\": seq_label_evaluate,\n",
        "        \"seq_label_mistakes\": seq_label_mistakes,\n",
        "        \"societal_issues\": societal_issues,\n",
        "        \"cumulative_times\": times\n",
        "}\n",
        "\n",
        "src = \"https://airtable.com/embed/shr76QznLcC66uGol?\"\n",
        "\n",
        "display(IFrame(src = prefill_form(src, fields), width = 800, height = 400))\n"
      ],
      "execution_count": null,
      "outputs": []
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "QE01fxjz53zd"
      },
      "source": [
        "## Feedback\n",
        "How could this session have been better? How happy are you in your group? How do you feel right now?\n",
        "\n",
        "Feel free to use the embeded form below or use this link:\n",
        "<a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://airtable.com/shrNSJ5ECXhNhsYss\">https://airtable.com/shrNSJ5ECXhNhsYss</a>"
      ]
    },
    {
      "cell_type": "code",
      "metadata": {
        "id": "YJ_IvTLV5H-P"
      },
      "source": [
        "display(IFrame(src=\"https://airtable.com/embed/shrNSJ5ECXhNhsYss?backgroundColor=red\", width = 800, height = 400))"
      ],
      "execution_count": null,
      "outputs": []
    }
  ]
}