{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import argparse\n",
    "import os\n",
    "import random\n",
    "from collections import Counter\n",
    "from itertools import chain\n",
    "from random import shuffle\n",
    "from typing import Dict, List, Tuple\n",
    "\n",
    "import nltk\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import sacremoses\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from nltk.translate.bleu_score import corpus_bleu\n",
    "from nltk.translate.meteor_score import meteor_score\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from tqdm import tqdm"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class EncoderLSTM(torch.nn.Module):\n",
    "    def __init__(\n",
    "        self, embedding: torch.nn.Embedding, hid_dim: int, n_layers: int, dropout: float\n",
    "    ):\n",
    "        super().__init__()\n",
    "        self.hid_dim = hid_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = embedding\n",
    "        self.rnn = torch.nn.LSTM(\n",
    "            embedding.embedding_dim,\n",
    "            hid_dim,\n",
    "            n_layers,\n",
    "            dropout=dropout,\n",
    "            batch_first=True,\n",
    "        )\n",
    "        self.dropout = torch.nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, src):\n",
    "        embedded = self.dropout(self.embedding(src))\n",
    "        outputs, (hidden, cell) = self.rnn(embedded)\n",
    "        return hidden, cell, outputs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Attention(nn.Module):\n",
    "    def __init__(self, enc_hid_dim, dec_hid_dim):\n",
    "        super().__init__()\n",
    "        self.attn = nn.Linear(enc_hid_dim + dec_hid_dim, dec_hid_dim)\n",
    "        self.v = nn.Linear(dec_hid_dim, 1, bias=False)\n",
    "\n",
    "    def forward(self, hidden, encoder_outputs, mask):\n",
    "        # hidden: [batch size, dec hid dim], encoder_outputs: [batch size, src len, enc hid dim]\n",
    "        src_len = encoder_outputs.shape[1]\n",
    "\n",
    "        hidden = hidden.unsqueeze(1).repeat(\n",
    "            1, src_len, 1\n",
    "        )  # Repeat decoder hidden state src_len times\n",
    "        energy = torch.tanh(self.attn(torch.cat((hidden, encoder_outputs), dim=2)))\n",
    "        attention = self.v(energy).squeeze(2)  # [batch size, src len]\n",
    "\n",
    "        attention = attention.masked_fill(mask == 0, -1e10)  # Apply mask\n",
    "        return F.softmax(attention, dim=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class DecoderLSTM(torch.nn.Module):\n",
    "    def __init__(self, embedding, hid_dim, n_layers, dropout):\n",
    "        super().__init__()\n",
    "        self.hid_dim = hid_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = embedding\n",
    "        self.rnn = torch.nn.LSTM(\n",
    "            embedding.embedding_dim,\n",
    "            hid_dim,\n",
    "            n_layers,\n",
    "            dropout=dropout,\n",
    "            batch_first=True,\n",
    "        )\n",
    "        self.fc_out = torch.nn.Linear(hid_dim, embedding.num_embeddings)\n",
    "        self.dropout = torch.nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, input, hidden, cell, *args, **kwargs):\n",
    "        input = input.unsqueeze(1)\n",
    "        embedded = self.dropout(self.embedding(input))\n",
    "        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n",
    "        prediction = self.fc_out(output.squeeze(1))\n",
    "        return prediction, hidden, cell\n",
    "\n",
    "\n",
    "class DecoderAttentionLSTM(torch.nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        embedding,\n",
    "        output_dim,\n",
    "        enc_hid_dim,\n",
    "        dec_hid_dim,\n",
    "        n_layers,\n",
    "        dropout,\n",
    "        attention,\n",
    "    ):\n",
    "        super().__init__()\n",
    "        self.output_dim = output_dim\n",
    "        self.hid_dim = dec_hid_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = embedding\n",
    "        self.rnn = torch.nn.LSTM(\n",
    "            embedding.embedding_dim,\n",
    "            dec_hid_dim,\n",
    "            n_layers,\n",
    "            dropout=dropout,\n",
    "            batch_first=True,\n",
    "        )\n",
    "        self.fc_out = torch.nn.Linear(enc_hid_dim + dec_hid_dim, output_dim)\n",
    "        self.dropout = torch.nn.Dropout(dropout)\n",
    "        self.attention = attention\n",
    "\n",
    "    def forward(self, input, hidden, cell, encoder_outputs, encoder_mask):\n",
    "        # input: [batch size], hidden: [batch size, n layers, hid dim], cell: [batch size, n layers, hid dim]\n",
    "        input = input.unsqueeze(1)  # [batch size, 1]\n",
    "        embedded = self.dropout(self.embedding(input))  # [batch size, 1, emb dim]\n",
    "\n",
    "        # Run through RNN\n",
    "        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n",
    "\n",
    "        # Calculate attention weights\n",
    "        attention_weights = self.attention(\n",
    "            hidden[-1],\n",
    "            encoder_outputs,\n",
    "            encoder_mask,\n",
    "        )\n",
    "\n",
    "        # Apply attention weights to encoder outputs\n",
    "        encoder_outputs = encoder_outputs.permute(0, 2, 1)\n",
    "        weighted = torch.bmm(encoder_outputs, attention_weights.unsqueeze(2)).squeeze(2)\n",
    "\n",
    "        # Prepare input for the fully connected layer\n",
    "        rnn_output = hidden[-1]\n",
    "        output = torch.cat((rnn_output, weighted), dim=1)\n",
    "        prediction = self.fc_out(output)\n",
    "\n",
    "        return prediction, hidden, cell"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Seq2Seq(torch.nn.Module):\n",
    "    def __init__(self, encoder, decoder):\n",
    "        super().__init__()\n",
    "        self.encoder = encoder\n",
    "        self.decoder = decoder\n",
    "\n",
    "    @staticmethod\n",
    "    def create_mask(src):\n",
    "        mask = (src != 0).permute(0, 1)  # Assuming that the padding token index is 0\n",
    "        return mask  # [batch size, src len]\n",
    "\n",
    "    def forward(self, src, trg, teacher_forcing_ratio=1):\n",
    "        batch_size = trg.shape[0]\n",
    "        trg_len = trg.shape[1]\n",
    "        trg_vocab_size = self.decoder.embedding.num_embeddings\n",
    "\n",
    "        outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(src.device)\n",
    "\n",
    "        hidden, cell, encoder_outputs = self.encoder(src)\n",
    "        encoder_mask = self.create_mask(src)\n",
    "\n",
    "        input = trg[:, 0]\n",
    "\n",
    "        for t in range(1, trg_len):\n",
    "            output, hidden, cell = self.decoder(\n",
    "                input, hidden, cell, encoder_outputs, encoder_mask\n",
    "            )\n",
    "            outputs[:, t] = output\n",
    "            teacher_force = random.random() < teacher_forcing_ratio\n",
    "            top1 = output.argmax(1)\n",
    "            input = trg[:, t] if teacher_force else top1\n",
    "\n",
    "        return outputs\n",
    "\n",
    "    def generate(self, src, max_len=150, eos=2):\n",
    "        hidden, cell, encoder_outputs = self.encoder(src)\n",
    "        encoder_mask = self.create_mask(src)\n",
    "\n",
    "        batch_size = src.shape[0]\n",
    "        trg = torch.zeros(batch_size, max_len).to(src.device)\n",
    "\n",
    "        input = src[:, 0]\n",
    "\n",
    "        for t in range(1, max_len):\n",
    "            output, hidden, cell = self.decoder(\n",
    "                input, hidden, cell, encoder_outputs, encoder_mask\n",
    "            )\n",
    "            top1 = output.argmax(1)\n",
    "            trg[:, t] = top1\n",
    "            input = top1\n",
    "\n",
    "        # set tokens behind eos as 0\n",
    "        for i in range(trg.shape[0]):\n",
    "            index_eos = torch.where(trg[i] == eos)[0]\n",
    "            if index_eos.shape[0] > 0:\n",
    "                trg[i, index_eos[0] + 1 :] = 0\n",
    "\n",
    "        return trg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tokenizer = sacremoses.MosesTokenizer()\n",
    "detokenizer = sacremoses.MosesDetokenizer()\n",
    "normalizer = sacremoses.MosesPunctNormalizer(\n",
    "    pre_replace_unicode_punct=False,\n",
    "    post_remove_control_chars=False,\n",
    ")\n",
    "\n",
    "\n",
    "def preprocess(text: str, return_str=True) -> str:\n",
    "    \"\"\"Preprocess text data.\"\"\"\n",
    "    text = normalizer.normalize(text)\n",
    "    # Tokenize the text\n",
    "    tokens = tokenizer.tokenize(text, return_str=return_str)\n",
    "\n",
    "    return tokens\n",
    "\n",
    "\n",
    "def postprocess(tokens: List[str]) -> str:\n",
    "    \"\"\"Postprocess tokens to text.\"\"\"\n",
    "    # Detokenize the tokens\n",
    "    # text = detokenizer.detokenize(tokens, return_str=True)\n",
    "\n",
    "    return \" \".join(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# nltk.download(\"wordnet\")\n",
    "\n",
    "def given_items_percentage(references: List[str], candidate: str) -> Tuple[int, int]:\n",
    "    \"\"\"Calculate the percentage of items in the candidate that are present in the references.\"\"\"\n",
    "    candidate_tokens = candidate.split()\n",
    "    reference_tokens = list(chain.from_iterable([ref.split() for ref in references]))\n",
    "\n",
    "    if not candidate_tokens:\n",
    "        return 0\n",
    "\n",
    "    count = 0\n",
    "    for token in candidate_tokens:\n",
    "        if token in reference_tokens:\n",
    "            count += 1\n",
    "\n",
    "    return count, len(candidate_tokens)\n",
    "\n",
    "\n",
    "def corpus_given_items_percentage(\n",
    "    references: List[List[str]], candidates: List[str]\n",
    ") -> float:\n",
    "    \"\"\"Calculate the average percentage of items in the candidates that are present in the references.\"\"\"\n",
    "    total_count = 0\n",
    "    total_length = 0\n",
    "    for reference, candidate in zip(references, candidates):\n",
    "        count, length = given_items_percentage(reference, candidate)\n",
    "        total_count += count\n",
    "        total_length += length\n",
    "\n",
    "    return total_count / total_length\n",
    "\n",
    "\n",
    "def extra_items_percentage(references: List[str], candidate: str) -> Tuple[int, int]:\n",
    "    \"\"\"Calculate the percentage of items in the candidate that are not present in the references.\"\"\"\n",
    "    candidate_tokens = candidate.split()\n",
    "    reference_tokens = list(chain.from_iterable([ref.split() for ref in references]))\n",
    "\n",
    "    if not candidate_tokens:\n",
    "        return 0\n",
    "\n",
    "    count = 0\n",
    "    for token in candidate_tokens:\n",
    "        if token not in reference_tokens:\n",
    "            count += 1\n",
    "\n",
    "    return count, len(candidate_tokens)\n",
    "\n",
    "\n",
    "def corpus_extra_items_percentage(\n",
    "    references: List[List[str]], candidates: List[str]\n",
    ") -> float:\n",
    "    \"\"\"Calculate the average percentage of items in the candidates that are not present in the references.\"\"\"\n",
    "    total_count = 0\n",
    "    total_length = 0\n",
    "    for reference, candidate in zip(references, candidates):\n",
    "        count, length = extra_items_percentage(reference, candidate)\n",
    "        total_count += count\n",
    "        total_length += length\n",
    "\n",
    "    return total_count / total_length\n",
    "\n",
    "\n",
    "def corpus_meteor(refs: List[List[str]], hyps: List[str]) -> float:\n",
    "    \"\"\"Calculate the METEOR score on a corpus\n",
    "    Args:\n",
    "        refs (list of list of str): List of reference translations where each reference translation is a list of tokens.\n",
    "        hyps (list of str): List of hypothesis translations where each hypothesis translation is a list of tokens.\n",
    "\n",
    "    Returns:\n",
    "        float: The average METEOR score of the corpus.\n",
    "    \"\"\"\n",
    "    scores = []\n",
    "    for ref, hyp in zip(refs, hyps):\n",
    "        # Note: `meteor_score` expects a list of reference sentences where each is a single string\n",
    "        score = meteor_score([r.split() for r in ref], hyp.split())\n",
    "        scores.append(score)\n",
    "    return sum(scores) / len(scores) if scores else 0\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_glove_embeddings(glove_file):\n",
    "    \"\"\"Load GloVe embeddings from a file into a dictionary.\"\"\"\n",
    "    embeddings_dict = {}\n",
    "    with open(glove_file, \"r\", encoding=\"utf-8\") as file:\n",
    "        for line in file:\n",
    "            values = line.split()\n",
    "            word = values[0]\n",
    "            vector = np.asarray(values[1:], dtype=\"float32\")\n",
    "            embeddings_dict[word] = vector\n",
    "    return embeddings_dict\n",
    "\n",
    "\n",
    "class Vocabulary:\n",
    "    PAD = \"<PAD>\"\n",
    "    SOS = \"<SOS>\"\n",
    "    EOS = \"<EOS>\"\n",
    "    UNK = \"<UNK>\"\n",
    "\n",
    "    def __init__(\n",
    "        self,\n",
    "        itos: Dict[int, str],\n",
    "        stoi: Dict[str, int],\n",
    "        embedding_dim: int = 100,\n",
    "        pretrained_weight: Dict[str, List[str]] = None,\n",
    "    ):\n",
    "        self.itos = itos\n",
    "        self.stoi = stoi\n",
    "        self.pretrained_weight = pretrained_weight\n",
    "        self.special_tokens = [self.PAD, self.SOS, self.EOS, self.UNK]\n",
    "        self.special_indices = [self.stoi[token] for token in self.special_tokens]\n",
    "\n",
    "        if pretrained_weight:\n",
    "            assert len(next(iter(self.pretrained_weight.values()))) == embedding_dim\n",
    "\n",
    "        self.embedding_dim = embedding_dim\n",
    "\n",
    "    @classmethod\n",
    "    def from_text(\n",
    "        cls, sentence_list: List[str], embedding_dim: int, freq_threshold: int = 5\n",
    "    ):\n",
    "        frequencies = Counter()\n",
    "        for sentence in sentence_list:\n",
    "            tokens = preprocess(sentence, return_str=False)\n",
    "            frequencies.update(tokens)\n",
    "        stoi = {cls.PAD: 0, cls.SOS: 1, cls.EOS: 2, cls.UNK: 3}\n",
    "        idx = 4\n",
    "        for word, freq in frequencies.items():\n",
    "            if freq >= freq_threshold:\n",
    "                stoi[word] = idx\n",
    "                idx += 1\n",
    "        itos = {v: k for k, v in stoi.items()}\n",
    "        return cls(itos, stoi, embedding_dim)\n",
    "\n",
    "    def create_embedding_layer(self) -> torch.nn.Embedding:\n",
    "        embedding_dim = self.embedding_dim\n",
    "        embedding_layer = torch.nn.Embedding(self.vocab_size, embedding_dim)\n",
    "\n",
    "        if self.pretrained_weight is None:\n",
    "            return embedding_layer\n",
    "\n",
    "        weights_matrix = np.zeros((self.vocab_size, embedding_dim))\n",
    "        # Fill in the matrix with glove vectors\n",
    "        for i, (_, vector) in enumerate(self.pretrained_weight.items(), 4):\n",
    "            weights_matrix[i] = vector\n",
    "\n",
    "        # Initialize special tokens with random vectors (can also use zeros or other initialization)\n",
    "        weights_matrix[0] = np.zeros(embedding_dim)  # <PAD> often initialized to zero\n",
    "        weights_matrix[1:4] = np.random.normal(\n",
    "            scale=0.6, size=(3, embedding_dim)\n",
    "        )  # <SOS>, <EOS>, <UNK>\n",
    "        embedding_layer.weight = torch.nn.Parameter(\n",
    "            torch.tensor(weights_matrix, dtype=torch.float32), requires_grad=False\n",
    "        )\n",
    "\n",
    "        # Enable training only for special tokens\n",
    "        embedding_layer.weight.requires_grad_(False)  # Freeze all weights initially\n",
    "        special_token_indices = torch.tensor(\n",
    "            [0, 1, 2, 3], dtype=torch.long\n",
    "        )  # Indices of special tokens\n",
    "        embedding_layer.weight[special_token_indices].requires_grad_(\n",
    "            True\n",
    "        )  # Unfreeze only special tokens\n",
    "        return embedding_layer\n",
    "\n",
    "    @classmethod\n",
    "    def from_pretrained_glove(cls, glove_file: str, embedding_dim: int):\n",
    "        embeddings = load_glove_embeddings(glove_file)\n",
    "        itos = {0: cls.PAD, 1: cls.SOS, 2: cls.EOS, 3: cls.UNK}\n",
    "        itos.update({word: idx for idx, word in enumerate(embeddings.keys(), start=4)})\n",
    "        stoi = {v: k for k, v in itos.items()}\n",
    "        return cls(\n",
    "            itos, stoi, embedding_dim=embedding_dim, pretrained_weight=embeddings\n",
    "        )\n",
    "\n",
    "    def numericalize(self, text, max_len=-1):\n",
    "        \"\"\"\n",
    "        Convert a text sentence to a list of indices. Add <SOS> at the beginning and <EOS> at the end of the sequence.\n",
    "\n",
    "        :param text: input sentence as a string\n",
    "        :param max_len: max length of returned tokens\n",
    "        \"\"\"\n",
    "        indices = [self.stoi[self.SOS]]\n",
    "        # Convert each word to the corresponding index. If word is not in vocab, use <UNK>\n",
    "        for word in preprocess(text, return_str=False):\n",
    "            indices.append(self.stoi.get(word, self.stoi[self.UNK]))\n",
    "        if max_len > 0:\n",
    "            indices = indices[: max_len - 1]  # Trim to max_len - 1\n",
    "        indices.append(self.stoi[self.EOS])\n",
    "\n",
    "        return indices\n",
    "\n",
    "    def denumericalize(self, indices, remove_special_tokens=True):\n",
    "        tokens = [\n",
    "            self.itos[i]\n",
    "            for i in indices\n",
    "            if i not in self.special_indices or not remove_special_tokens\n",
    "        ]\n",
    "        return postprocess(tokens)\n",
    "\n",
    "    @property\n",
    "    def vocab_size(self):\n",
    "        return len(self.stoi)\n",
    "\n",
    "\n",
    "class Seq2SeqDataset(Dataset):\n",
    "    def __init__(\n",
    "        self,\n",
    "        input_texts,\n",
    "        output_texts,\n",
    "        src_vocab: Vocabulary,\n",
    "        tgt_vocab: Vocabulary,\n",
    "        max_len=150,\n",
    "    ):\n",
    "        \"\"\"\n",
    "        Initialize the dataset with input and output texts, a vocabulary dictionary, and maximum sequence length.\n",
    "\n",
    "        :param input_texts: list of input sentences\n",
    "        :param output_texts: list of output sentences\n",
    "        :param vocab: dictionary mapping words to indices, including special tokens\n",
    "        \"\"\"\n",
    "        inputs = [\n",
    "            np.array(src_vocab.numericalize(text, max_len)) for text in input_texts\n",
    "        ]\n",
    "        outputs = [\n",
    "            np.array(tgt_vocab.numericalize(text, max_len)) for text in output_texts\n",
    "        ]\n",
    "        # sorted by outputs length\n",
    "        batch = list(zip(inputs, outputs))\n",
    "        batch.sort(key=lambda x: x[1].shape[0])\n",
    "        inputs, outputs = zip(*batch)\n",
    "        self.inputs = list(inputs)\n",
    "        self.outputs = list(outputs)\n",
    "\n",
    "        self.src_vocab = src_vocab\n",
    "        self.tgt_vocab = tgt_vocab\n",
    "        self.max_len = max_len\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.inputs)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return {\n",
    "            \"input\": torch.tensor(self.inputs[idx], dtype=torch.long),\n",
    "            \"output\": torch.tensor(self.outputs[idx], dtype=torch.long),\n",
    "        }\n",
    "\n",
    "\n",
    "def data_collate_fn(batch):\n",
    "    input_seqs = [item[\"input\"] for item in batch]\n",
    "    target_seqs = [item[\"output\"] for item in batch]\n",
    "    input_lens = [len(seq) for seq in input_seqs]\n",
    "    target_lens = [len(seq) for seq in target_seqs]\n",
    "\n",
    "    return {\n",
    "        \"input\": torch.nn.utils.rnn.pad_sequence(input_seqs, batch_first=True),\n",
    "        \"output\": torch.nn.utils.rnn.pad_sequence(target_seqs, batch_first=True),\n",
    "        \"input_len\": torch.tensor(input_lens),\n",
    "        \"output_len\": torch.tensor(target_lens),\n",
    "    }"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# eval loop\n",
    "def evaluation(model, iterator, criterion, device=\"cpu\"):\n",
    "    model.eval()\n",
    "    epoch_loss = 0\n",
    "    model.to(device)\n",
    "\n",
    "    total = 0\n",
    "    right = 0\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for i, batch in enumerate(iterator):\n",
    "            src = batch[\"input\"]\n",
    "            trg = batch[\"output\"]\n",
    "            output = model(src.to(device), trg.to(device), 1)\n",
    "            output_dim = output.shape[-1]\n",
    "            output = output[1:].view(-1, output_dim)\n",
    "            trg = trg[1:].view(-1)\n",
    "            loss = criterion(output, trg.to(device))\n",
    "            epoch_loss += loss.item()\n",
    "            total += (trg != 0).sum().item()\n",
    "            right += (\n",
    "                ((output.argmax(1) == trg.to(device)) & (trg != 0).to(device))\n",
    "                .sum()\n",
    "                .item()\n",
    "            )\n",
    "\n",
    "    model.train()\n",
    "    return epoch_loss / len(iterator), right / total\n",
    "\n",
    "# training loop\n",
    "def train(\n",
    "    model,\n",
    "    iterator,\n",
    "    eval_iterator,\n",
    "    test_iterator,\n",
    "    optimizer,\n",
    "    criterion,\n",
    "    clip,\n",
    "    vocab: Vocabulary,\n",
    "    saved_folder=\"checkpoint\",\n",
    "    device=\"cpu\",\n",
    "    max_epoch=20,\n",
    "    eval_every_n_batch=100,\n",
    "    early_stopping_patience=10,\n",
    "):\n",
    "    writer = SummaryWriter(log_dir=os.path.join(saved_folder, \"tensorboard\"))\n",
    "    if not os.path.exists(saved_folder):\n",
    "        os.makedirs(saved_folder)\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    model.to(device)\n",
    "\n",
    "    max_acc = 0\n",
    "    patience = 0\n",
    "\n",
    "    for epoch in range(max_epoch):\n",
    "        for i, batch in tqdm(enumerate(iterator), total=len(iterator)):\n",
    "            optimizer.zero_grad()\n",
    "            src = batch[\"input\"]\n",
    "            trg = batch[\"output\"]\n",
    "            output = model(src.to(device), trg.to(device))\n",
    "            output_dim = output.shape[-1]\n",
    "            output = output[1:].view(-1, output_dim)\n",
    "            trg = trg[1:].view(-1)\n",
    "            loss = criterion(output, trg.to(device))\n",
    "            loss.backward()\n",
    "            torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n",
    "            optimizer.step()\n",
    "            epoch_loss += loss.item()\n",
    "            writer.add_scalar(\"Loss/train\", loss.item(), epoch * len(iterator) + i + 1)\n",
    "\n",
    "            if i % eval_every_n_batch == 0 and i > 0:\n",
    "                print(f\"Current training last 100 batch: {epoch_loss / i}\")\n",
    "                eval_loss, acc = evaluation(model, eval_iterator, criterion, device)\n",
    "                print(f\"Current eval loss: {eval_loss}. acc {acc}.\")\n",
    "                writer.add_scalar(\"Loss/dev\", eval_loss, epoch * len(iterator) + i + 1)\n",
    "                writer.add_scalar(\"Acc/dev\", acc, epoch * len(iterator) + i + 1)\n",
    "                if acc > max_acc:\n",
    "                    max_acc = acc\n",
    "                    patience = 0\n",
    "                    torch.save(model, os.path.join(saved_folder, f\"best.pt\"))\n",
    "                else:\n",
    "                    patience += 1\n",
    "                    if patience == early_stopping_patience:\n",
    "                        print(\"Early stopping\")\n",
    "                        break\n",
    "\n",
    "        print(f\"Training epoch {epoch} done.\")\n",
    "        eval_loss, acc = evaluation(model, eval_iterator, criterion, device)\n",
    "        bleu, meteor, gip, eip = test(model, eval_iterator, vocab, device=device)\n",
    "        writer.add_scalar(\"bleu\", bleu, epoch)\n",
    "        writer.add_scalar(\"meteor\", meteor, epoch)\n",
    "        writer.add_scalar(\"Given Items Percentage\", gip, epoch)\n",
    "        writer.add_scalar(\"Extra Items Percentage\", eip, epoch)\n",
    "\n",
    "    model = torch.load(\"checkpoints/seq2seq/best.pt\")\n",
    "    test(model, test_iterator, vocab, device=device)\n",
    "\n",
    "\n",
    "def test(model, iterator, vocab: Vocabulary, device=\"cpu\"):\n",
    "    model.eval()\n",
    "    model.to(device)\n",
    "    references = []\n",
    "    predictions = []\n",
    "\n",
    "    with torch.no_grad():\n",
    "        for i, batch in enumerate(iterator):\n",
    "            src = batch[\"input\"]\n",
    "            prediction = model.generate(src.to(device))\n",
    "            output = batch[\"output\"]\n",
    "\n",
    "            for p, o in zip(prediction, output):\n",
    "                references.append([vocab.denumericalize(o.tolist())])\n",
    "                predictions.append(vocab.denumericalize(p.tolist()))\n",
    "\n",
    "    bleu = corpus_bleu(references, predictions)\n",
    "    meteor = corpus_meteor(references, predictions)\n",
    "    gip = corpus_given_items_percentage(references, predictions)\n",
    "    eip = corpus_extra_items_percentage(references, predictions)\n",
    "\n",
    "    print(\n",
    "        f\"BLEU: {bleu}, METEOR: {meteor}, Given Items Percentage: {gip}, Extra Items Percentage: {eip}\"\n",
    "    )\n",
    "    model.train()\n",
    "    return bleu, meteor, gip, eip"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "MAX_LEN = 150\n",
    "EPOCHS = 10\n",
    "BATCH_SIZE = 64\n",
    "\n",
    "EMBEDDING_DIM = 100\n",
    "ENCODER_HID_DIM = 256\n",
    "DECODER_HID_DIM = 256\n",
    "DROPOUT = 0.1\n",
    "DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "\n",
    "\n",
    "def load_texts_from_csv(file_path: str) -> Tuple[List[str], List[str]]:\n",
    "    dataframe = pd.read_csv(file_path)\n",
    "    dataframe = dataframe.dropna(subset=[\"Ingredients\", \"Recipe\"])\n",
    "    ingredients = dataframe[\"Ingredients\"].tolist()\n",
    "    recipes = dataframe[\"Recipe\"].tolist()\n",
    "    return ingredients, recipes\n",
    "\n",
    "\n",
    "train_src, train_tgt = load_texts_from_csv(\"./data/train.csv\")\n",
    "dev_src, dev_tgt = load_texts_from_csv(\"./data/dev.csv\")\n",
    "test_src, test_tgt = load_texts_from_csv(\"./data/test.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_seq2seq():\n",
    "    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)\n",
    "    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)\n",
    "\n",
    "    encoder = EncoderLSTM(\n",
    "        embedding=src_vocab.create_embedding_layer(),\n",
    "        hid_dim=ENCODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    decoder = DecoderLSTM(\n",
    "        embedding=tgt_vocab.create_embedding_layer(),\n",
    "        hid_dim=DECODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    model = Seq2Seq(encoder, decoder)\n",
    "    criteria = torch.nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "    dataset = Seq2SeqDataset(\n",
    "        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab\n",
    "    )\n",
    "    dataloader = DataLoader(\n",
    "        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    dev_dataset = Seq2SeqDataset(\n",
    "        dev_src,\n",
    "        dev_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    dev_dataloader = DataLoader(\n",
    "        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    test_dataset = Seq2SeqDataset(\n",
    "        test_src,\n",
    "        test_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    test_dataloader = DataLoader(\n",
    "        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    train(\n",
    "        model,\n",
    "        dataloader,\n",
    "        dev_dataloader,\n",
    "        test_dataloader,\n",
    "        optimizer,\n",
    "        criteria,\n",
    "        1,\n",
    "        tgt_vocab,\n",
    "        saved_folder=\"checkpoints/seq2seq\",\n",
    "        device=DEVICE,\n",
    "        max_epoch=EPOCHS,\n",
    "    )\n",
    "\n",
    "run_seq2seq()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_seq2seq_attention():\n",
    "    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)\n",
    "    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)\n",
    "\n",
    "    encoder = EncoderLSTM(\n",
    "        embedding=src_vocab.create_embedding_layer(),\n",
    "        hid_dim=ENCODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    attention = Attention(\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "    )\n",
    "    decoder = DecoderAttentionLSTM(\n",
    "        embedding=tgt_vocab.create_embedding_layer(),\n",
    "        output_dim=tgt_vocab.vocab_size,\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "        attention=attention,\n",
    "    )\n",
    "    model = Seq2Seq(encoder, decoder)\n",
    "    criteria = torch.nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "    dataset = Seq2SeqDataset(\n",
    "        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab\n",
    "    )\n",
    "    dataloader = DataLoader(\n",
    "        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    dev_dataset = Seq2SeqDataset(\n",
    "        dev_src,\n",
    "        dev_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    dev_dataloader = DataLoader(\n",
    "        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    test_dataset = Seq2SeqDataset(\n",
    "        test_src,\n",
    "        test_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    test_dataloader = DataLoader(\n",
    "        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    train(\n",
    "        model,\n",
    "        dataloader,\n",
    "        dev_dataloader,\n",
    "        test_dataloader,\n",
    "        optimizer,\n",
    "        criteria,\n",
    "        1,\n",
    "        tgt_vocab,\n",
    "        saved_folder=\"checkpoints/seq2seq_attention\",\n",
    "        device=DEVICE,\n",
    "        max_epoch=EPOCHS,\n",
    "    )\n",
    "    \n",
    "run_seq2seq_attention()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "def run_seq2seq_attention_with_shared_embedding():\n",
    "    vocab = Vocabulary.from_text(train_src + train_tgt, embedding_dim=EMBEDDING_DIM)\n",
    "    embedding_layer = vocab.create_embedding_layer()\n",
    "\n",
    "    encoder = EncoderLSTM(\n",
    "        embedding=embedding_layer,\n",
    "        hid_dim=ENCODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    attention = Attention(\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "    )\n",
    "    decoder = DecoderAttentionLSTM(\n",
    "        embedding=embedding_layer,\n",
    "        output_dim=vocab.vocab_size,\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "        attention=attention,\n",
    "    )\n",
    "    model = Seq2Seq(encoder, decoder)\n",
    "    criteria = torch.nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "    dataset = Seq2SeqDataset(train_src, train_tgt, src_vocab=vocab, tgt_vocab=vocab)\n",
    "    dataloader = DataLoader(\n",
    "        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    dev_dataset = Seq2SeqDataset(\n",
    "        dev_src,\n",
    "        dev_tgt,\n",
    "        src_vocab=vocab,\n",
    "        tgt_vocab=vocab,\n",
    "    )\n",
    "    dev_dataloader = DataLoader(\n",
    "        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    test_dataset = Seq2SeqDataset(\n",
    "        test_src,\n",
    "        test_tgt,\n",
    "        src_vocab=vocab,\n",
    "        tgt_vocab=vocab,\n",
    "    )\n",
    "    test_dataloader = DataLoader(\n",
    "        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    train(\n",
    "        model,\n",
    "        dataloader,\n",
    "        dev_dataloader,\n",
    "        test_dataloader,\n",
    "        optimizer,\n",
    "        criteria,\n",
    "        1,\n",
    "        vocab,\n",
    "        saved_folder=\"checkpoints/seq2seq_attention_with_shared_embedding\",\n",
    "        device=DEVICE,\n",
    "        max_epoch=EPOCHS,\n",
    "    )\n",
    "    \n",
    "run_seq2seq_attention_with_shared_embedding()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_seq2seq_attention_with_stack_layers():\n",
    "    src_vocab = Vocabulary.from_text(train_src, embedding_dim=EMBEDDING_DIM)\n",
    "    tgt_vocab = Vocabulary.from_text(train_tgt, embedding_dim=EMBEDDING_DIM)\n",
    "\n",
    "    encoder = EncoderLSTM(\n",
    "        embedding=src_vocab.create_embedding_layer(),\n",
    "        hid_dim=ENCODER_HID_DIM,\n",
    "        n_layers=3,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    attention = Attention(\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "    )\n",
    "    decoder = DecoderAttentionLSTM(\n",
    "        embedding=tgt_vocab.create_embedding_layer(),\n",
    "        output_dim=tgt_vocab.vocab_size,\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "        n_layers=3,\n",
    "        dropout=DROPOUT,\n",
    "        attention=attention,\n",
    "    )\n",
    "    model = Seq2Seq(encoder, decoder)\n",
    "    criteria = torch.nn.CrossEntropyLoss()\n",
    "    optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "    dataset = Seq2SeqDataset(\n",
    "        train_src, train_tgt, src_vocab=src_vocab, tgt_vocab=tgt_vocab\n",
    "    )\n",
    "    dataloader = DataLoader(\n",
    "        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    dev_dataset = Seq2SeqDataset(\n",
    "        dev_src,\n",
    "        dev_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    dev_dataloader = DataLoader(\n",
    "        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    test_dataset = Seq2SeqDataset(\n",
    "        test_src,\n",
    "        test_tgt,\n",
    "        src_vocab=src_vocab,\n",
    "        tgt_vocab=tgt_vocab,\n",
    "    )\n",
    "    test_dataloader = DataLoader(\n",
    "        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    train(\n",
    "        model,\n",
    "        dataloader,\n",
    "        dev_dataloader,\n",
    "        test_dataloader,\n",
    "        optimizer,\n",
    "        criteria,\n",
    "        1,\n",
    "        tgt_vocab,\n",
    "        saved_folder=\"checkpoints/seq2seq_attention_with_stack_layers\",\n",
    "        device=DEVICE,\n",
    "        max_epoch=EPOCHS,\n",
    "    )\n",
    "\n",
    "run_seq2seq_attention_with_stack_layers()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PointerGeneratorDecoder(torch.nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        embedding,\n",
    "        output_dim,\n",
    "        enc_hid_dim,\n",
    "        dec_hid_dim,\n",
    "        n_layers,\n",
    "        dropout,\n",
    "        attention,\n",
    "    ):\n",
    "        super().__init__()\n",
    "        emb_dim = embedding.embedding_dim\n",
    "        self.output_dim = output_dim\n",
    "        self.hid_dim = dec_hid_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = embedding\n",
    "        self.rnn = torch.nn.LSTM(\n",
    "            emb_dim,\n",
    "            dec_hid_dim,\n",
    "            n_layers,\n",
    "            dropout=dropout,\n",
    "            batch_first=True,\n",
    "        )\n",
    "        self.fc_out = torch.nn.Linear(dec_hid_dim + enc_hid_dim, output_dim)\n",
    "        self.dropout = torch.nn.Dropout(dropout)\n",
    "        self.attention = attention\n",
    "        self.pointer = torch.nn.Linear(dec_hid_dim + enc_hid_dim, 1)\n",
    "        self.sigmoid = torch.nn.Sigmoid()\n",
    "\n",
    "    def forward(self, input, hidden, cell, encoder_outputs, encoder_mask):\n",
    "        # input: [batch size], hidden: [batch size, n layers, hid dim], cell: [batch size, n layers, hid dim]\n",
    "        input = input.unsqueeze(1)\n",
    "        embedded = self.dropout(self.embedding(input))\n",
    "\n",
    "        # Run through rnn\n",
    "        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n",
    "\n",
    "        # Calculate attention weights\n",
    "        attention_weights = self.attention(\n",
    "            hidden[-1],\n",
    "            encoder_outputs,\n",
    "            encoder_mask,\n",
    "        )\n",
    "\n",
    "        # Apply attention weights to encoder outputs\n",
    "        encoder_outputs = encoder_outputs.permute(0, 2, 1)\n",
    "        weighted = torch.bmm(encoder_outputs, attention_weights.unsqueeze(2)).squeeze(2)\n",
    "\n",
    "        # Prepare input for the fully connected layer\n",
    "        rnn_output = hidden[-1]\n",
    "        output = torch.cat((rnn_output, weighted), dim=1)\n",
    "        prediction = self.fc_out(output)\n",
    "\n",
    "        # Calculate pointer-generator\n",
    "        pointer = self.sigmoid(self.pointer(output))\n",
    "        return prediction, hidden, cell, attention_weights, pointer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PointerGeneratorSeq2seq(Seq2Seq):\n",
    "    eps = 1e-31\n",
    "\n",
    "    def forward(self, src, trg, teacher_forcing_ratio=1):\n",
    "        batch_size = trg.shape[0]\n",
    "        trg_len = trg.shape[1]\n",
    "        trg_vocab_size = self.decoder.embedding.num_embeddings\n",
    "\n",
    "        outputs = torch.zeros(batch_size, trg_len, trg_vocab_size).to(src.device)\n",
    "\n",
    "        hidden, cell, encoder_outputs = self.encoder(src)\n",
    "        encoder_mask = self.create_mask(src)\n",
    "\n",
    "        input = trg[:, 0]\n",
    "\n",
    "        for t in range(1, trg_len):\n",
    "            output, hidden, cell, attention_weight, pointer = self.decoder(\n",
    "                input, hidden, cell, encoder_outputs, encoder_mask\n",
    "            )\n",
    "            output_distribution = F.softmax(output, dim=1) * pointer\n",
    "            attention_distribution = attention_weight * (1 - pointer)\n",
    "            output_distribution.scatter_add_(1, src, attention_distribution)\n",
    "\n",
    "            outputs[:, t] = output_distribution\n",
    "            teacher_force = random.random() < teacher_forcing_ratio\n",
    "            top1 = output.argmax(1)\n",
    "            input = trg[:, t] if teacher_force else top1\n",
    "        return torch.log(outputs + self.eps)\n",
    "\n",
    "    def generate(self, src, max_len=150, eos=2):\n",
    "        hidden, cell, encoder_outputs = self.encoder(src)\n",
    "        encoder_mask = self.create_mask(src)\n",
    "\n",
    "        batch_size = src.shape[0]\n",
    "        trg = torch.zeros(batch_size, max_len).to(src.device)\n",
    "\n",
    "        input = src[:, 0]\n",
    "\n",
    "        for t in range(1, max_len):\n",
    "            output, hidden, cell, attention_weight, pointer = self.decoder(\n",
    "                input, hidden, cell, encoder_outputs, encoder_mask\n",
    "            )\n",
    "            output_distribution = F.softmax(output, dim=1) * pointer\n",
    "            attention_distribution = attention_weight * (1 - pointer)\n",
    "\n",
    "            output_distribution.scatter_add_(1, src, attention_distribution)\n",
    "            top1 = output_distribution.argmax(1)\n",
    "            trg[:, t] = top1\n",
    "            input = top1\n",
    "\n",
    "        # set tokens behind eos as 0\n",
    "        for i in range(trg.shape[0]):\n",
    "            index_eos = torch.where(trg[i] == eos)[0]\n",
    "            if index_eos.shape[0] > 0:\n",
    "                trg[i, index_eos[0] + 1 :] = 0\n",
    "\n",
    "        return trg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def run_seq2seq_attention_with_pointer_generator():\n",
    "    # pgn must share emebdding layer\n",
    "    vocab = Vocabulary.from_text(train_src + train_tgt, embedding_dim=EMBEDDING_DIM)\n",
    "\n",
    "    encoder = EncoderLSTM(\n",
    "        embedding=vocab.create_embedding_layer(),\n",
    "        hid_dim=ENCODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "    )\n",
    "    attention = Attention(\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "    )\n",
    "\n",
    "    decoder = PointerGeneratorDecoder(\n",
    "        embedding=vocab.create_embedding_layer(),\n",
    "        output_dim=vocab.vocab_size,\n",
    "        enc_hid_dim=ENCODER_HID_DIM,\n",
    "        dec_hid_dim=DECODER_HID_DIM,\n",
    "        n_layers=1,\n",
    "        dropout=DROPOUT,\n",
    "        attention=attention,\n",
    "    )\n",
    "    model = PointerGeneratorSeq2seq(encoder, decoder)\n",
    "    criteria = torch.nn.NLLLoss(ignore_index=0)  # ignore padding\n",
    "    optimizer = torch.optim.Adam(model.parameters())\n",
    "\n",
    "    dataset = Seq2SeqDataset(train_src, train_tgt, src_vocab=vocab, tgt_vocab=vocab)\n",
    "    dataloader = DataLoader(\n",
    "        dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    dev_dataset = Seq2SeqDataset(\n",
    "        dev_src,\n",
    "        dev_tgt,\n",
    "        src_vocab=vocab,\n",
    "        tgt_vocab=vocab,\n",
    "    )\n",
    "    dev_dataloader = DataLoader(\n",
    "        dev_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    test_dataset = Seq2SeqDataset(\n",
    "        test_src,\n",
    "        test_tgt,\n",
    "        src_vocab=vocab,\n",
    "        tgt_vocab=vocab,\n",
    "    )\n",
    "    test_dataloader = DataLoader(\n",
    "        test_dataset, batch_size=BATCH_SIZE, shuffle=False, collate_fn=data_collate_fn\n",
    "    )\n",
    "    train(\n",
    "        model,\n",
    "        dataloader,\n",
    "        dev_dataloader,\n",
    "        test_dataloader,\n",
    "        optimizer,\n",
    "        criteria,\n",
    "        1,\n",
    "        vocab,\n",
    "        saved_folder=\"checkpoints/seq2seq_attention_with_pointer_generator\",\n",
    "        device=DEVICE,\n",
    "        max_epoch=EPOCHS,\n",
    "    )\n",
    "run_seq2seq_attention_with_pointer_generator()"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
