{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Requirement already satisfied: torch==2.2 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (2.2.0)\n",
      "Requirement already satisfied: filelock in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (3.13.1)\n",
      "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (4.9.0)\n",
      "Requirement already satisfied: sympy in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (1.12)\n",
      "Requirement already satisfied: networkx in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (2.8.5)\n",
      "Requirement already satisfied: jinja2 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (3.1.3)\n",
      "Requirement already satisfied: fsspec in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2) (2024.2.0)\n",
      "Requirement already satisfied: MarkupSafe>=2.0 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from jinja2->torch==2.2) (2.1.5)\n",
      "Requirement already satisfied: mpmath>=0.19 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from sympy->torch==2.2) (1.3.0)\n",
      "\u001b[33mDEPRECATION: pytorch-lightning 1.7.0 has a non-standard dependency specifier torch>=1.9.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0mRequirement already satisfied: torchtext==0.17 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (0.17.0)\n",
      "Requirement already satisfied: tqdm in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchtext==0.17) (4.66.2)\n",
      "Requirement already satisfied: requests in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchtext==0.17) (2.31.0)\n",
      "Requirement already satisfied: torch==2.2.0 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchtext==0.17) (2.2.0)\n",
      "Requirement already satisfied: numpy in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchtext==0.17) (1.26.4)\n",
      "Requirement already satisfied: torchdata==0.7.1 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchtext==0.17) (0.7.1)\n",
      "Requirement already satisfied: filelock in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (3.13.1)\n",
      "Requirement already satisfied: typing-extensions>=4.8.0 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (4.9.0)\n",
      "Requirement already satisfied: sympy in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (1.12)\n",
      "Requirement already satisfied: networkx in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (2.8.5)\n",
      "Requirement already satisfied: jinja2 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (3.1.3)\n",
      "Requirement already satisfied: fsspec in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torch==2.2.0->torchtext==0.17) (2024.2.0)\n",
      "Requirement already satisfied: urllib3>=1.25 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from torchdata==0.7.1->torchtext==0.17) (2.2.1)\n",
      "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from requests->torchtext==0.17) (3.3.2)\n",
      "Requirement already satisfied: idna<4,>=2.5 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from requests->torchtext==0.17) (3.6)\n",
      "Requirement already satisfied: certifi>=2017.4.17 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from requests->torchtext==0.17) (2024.2.2)\n",
      "Requirement already satisfied: MarkupSafe>=2.0 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from jinja2->torch==2.2.0->torchtext==0.17) (2.1.5)\n",
      "Requirement already satisfied: mpmath>=0.19 in /Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages (from sympy->torch==2.2.0->torchtext==0.17) (1.3.0)\n",
      "\u001b[33mDEPRECATION: pytorch-lightning 1.7.0 has a non-standard dependency specifier torch>=1.9.*. pip 24.0 will enforce this behaviour change. A possible replacement is to upgrade to a newer version of pytorch-lightning or contact the author to suggest that they release a version with a conforming dependency specifiers. Discussion can be found at https://github.com/pypa/pip/issues/12063\u001b[0m\u001b[33m\n",
      "\u001b[0m"
     ]
    }
   ],
   "source": [
    "!pip install torch==2.2\n",
    "!pip install torchtext==0.17"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import time\n",
    "\n",
    "import torch\n",
    "from torch import nn, Tensor\n",
    "import torch.nn.functional as F\n",
    "from torch.nn import TransformerEncoder, TransformerEncoderLayer\n",
    "from torch.utils.data import dataset\n",
    "\n",
    "from torchtext.datasets import PennTreebank\n",
    "from torchtext.data.utils import get_tokenizer\n",
    "from torchtext.vocab import build_vocab_from_iterator\n",
    "\n",
    "torch.use_deterministic_algorithms(True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Transformer(nn.Module):\n",
    "    def __init__(self, num_token, num_inputs, num_heads, num_hidden, num_layers, dropout=0.3):\n",
    "        super(Transformer, self).__init__()\n",
    "        self.model_name = 'transformer'\n",
    "        self.position_enc = PosEnc(num_inputs, dropout)\n",
    "        layers_enc = TransformerEncoderLayer(num_inputs, num_heads, num_hidden, dropout)\n",
    "        self.enc_transformer = TransformerEncoder(layers_enc, num_layers)\n",
    "        self.enc = nn.Embedding(num_token, num_inputs)\n",
    "        self.num_inputs = num_inputs\n",
    "        self.dec = nn.Linear(num_inputs, num_token)\n",
    "        self.init_params()\n",
    "\n",
    "    def init_params(self):\n",
    "        initial_rng = 0.12\n",
    "        self.enc.weight.data.uniform_(-initial_rng, initial_rng)\n",
    "        self.dec.bias.data.zero_()\n",
    "        self.dec.weight.data.uniform_(-initial_rng, initial_rng)\n",
    "\n",
    "    def forward(self, source, mask_source):\n",
    "        source = self.enc(source) * math.sqrt(self.num_inputs)\n",
    "        source = self.position_enc(source)\n",
    "        op = self.enc_transformer(source, mask_source)\n",
    "        op = self.dec(op)\n",
    "        return op\n",
    "\n",
    "def gen_sqr_nxt_mask(size):\n",
    "    msk = torch.triu(torch.ones(size, size) * float('-inf'), diagonal=1)\n",
    "    return msk"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class PosEnc(nn.Module):\n",
    "    def __init__(self, d_m, dropout=0.2, size_limit=5000):\n",
    "        super(PosEnc, self).__init__()\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        p_enc = torch.zeros(size_limit, 1, d_m)\n",
    "        pos = torch.arange(size_limit, dtype=torch.float).unsqueeze(1)\n",
    "        divider = torch.exp(torch.arange(0, d_m, 2).float() * (-math.log(10000.0) / d_m))\n",
    "        p_enc[:, 0, 0::2] = torch.sin(pos * divider)\n",
    "        p_enc[:, 0, 1::2] = torch.cos(pos * divider)\n",
    "        self.register_buffer('p_enc', p_enc)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.dropout(x + self.p_enc[:x.size(0)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "tr_iter = PennTreebank(split='train')\n",
    "tkzer = get_tokenizer('basic_english')\n",
    "vocabulary = build_vocab_from_iterator(map(tkzer, tr_iter), specials=['<unk>'])\n",
    "vocabulary.set_default_index(vocabulary['<unk>'])\n",
    "\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "def process_data(raw_text):\n",
    "    numericalised_text = [torch.tensor(vocabulary(tkzer(text)), dtype=torch.long) for text in raw_text]\n",
    "    return torch.cat(tuple(filter(lambda t: t.numel() > 0, numericalised_text)))\n",
    "\n",
    "tr_iter, val_iter, te_iter = PennTreebank()\n",
    "training_text = process_data(tr_iter)\n",
    "validation_text = process_data(val_iter)\n",
    "testing_text = process_data(te_iter)\n",
    "\n",
    "def gen_batches(text_dataset, batch_size):\n",
    "    num_batches = text_dataset.size(0) // batch_size\n",
    "    text_dataset = text_dataset[:num_batches * batch_size]\n",
    "    text_dataset = text_dataset.view(batch_size, num_batches).t().contiguous()\n",
    "    return text_dataset.to(device)\n",
    "\n",
    "training_batch_size = 32\n",
    "evaluation_batch_size = 16\n",
    "\n",
    "training_data = gen_batches(training_text, training_batch_size)\n",
    "validation_data = gen_batches(validation_text, evaluation_batch_size)\n",
    "testing_data = gen_batches(testing_text, evaluation_batch_size)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "max_seq_len = 64\n",
    "def return_batch(src, k):\n",
    "    sequence_length = min(max_seq_len, len(src) - 1 - k)\n",
    "    sequence_data = src[k:k+sequence_length]\n",
    "    sequence_label = src[k+1:k+1+sequence_length].reshape(-1)\n",
    "    return sequence_data, sequence_label"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/Ashish.Jha/anaconda3/envs/python39/lib/python3.9/site-packages/torch/nn/modules/transformer.py:286: UserWarning: enable_nested_tensor is True, but self.use_nested_tensor is False because encoder_layer.self_attn.batch_first was not True(use batch_first for better inference performance)\n",
      "  warnings.warn(f\"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}\")\n"
     ]
    }
   ],
   "source": [
    "num_tokens = len(vocabulary) # vocabulary size\n",
    "embedding_size = 256 # dimension of embedding layer\n",
    "num_hidden_params = 256 # transformer encoder's hidden (feed forward) layer dimension\n",
    "num_layers = 2 # num of transformer encoder layers within transformer encoder\n",
    "num_heads = 2 # num of heads in (multi head) attention models\n",
    "dropout = 0.25 # value (fraction) of dropout\n",
    "loss_func = nn.CrossEntropyLoss()\n",
    "lrate = 10.0 # learning rate\n",
    "transformer_model = Transformer(num_tokens, embedding_size, num_heads, num_hidden_params, num_layers, \n",
    "                                     dropout).to(device)\n",
    "optim_module = torch.optim.SGD(transformer_model.parameters(), lr=lrate)\n",
    "sched_module = torch.optim.lr_scheduler.StepLR(optim_module, 1.0, gamma=0.88)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model():\n",
    "    transformer_model.train()\n",
    "    loss_total = 0.\n",
    "    time_start = time.time()\n",
    "    mask_source = gen_sqr_nxt_mask(max_seq_len).to(device)\n",
    "    num_batches = len(training_data) // max_seq_len\n",
    "    for b, i in enumerate(range(0, training_data.size(0) - 1, max_seq_len)):\n",
    "        train_data_batch, train_label_batch = return_batch(training_data, i)\n",
    "        sequence_length = train_data_batch.size(0)\n",
    "        if sequence_length != max_seq_len:  # only on last batch\n",
    "            mask_source = mask_source[:sequence_length, :sequence_length]\n",
    "        op = transformer_model(train_data_batch, mask_source)\n",
    "        loss_curr = loss_func(op.view(-1, num_tokens), train_label_batch)\n",
    "        optim_module.zero_grad()\n",
    "        loss_curr.backward()\n",
    "        torch.nn.utils.clip_grad_norm_(transformer_model.parameters(), 0.6)\n",
    "        optim_module.step()\n",
    "\n",
    "        loss_total += loss_curr.item()\n",
    "        interval = 100\n",
    "        if b % interval == 0 and b > 0:\n",
    "            loss_interval = loss_total / interval\n",
    "            time_delta = time.time() - time_start\n",
    "            print(f\"epoch {ep}, {b}/{len(training_data)//max_seq_len} batches, training loss {loss_interval:.2f}, training perplexity {math.exp(loss_interval):.2f}\")\n",
    "            loss_total = 0\n",
    "            time_start = time.time()\n",
    "\n",
    "def eval_model(eval_model_obj, eval_data_source):\n",
    "    eval_model_obj.eval() \n",
    "    loss_total = 0.\n",
    "    mask_source = gen_sqr_nxt_mask(max_seq_len).to(device)\n",
    "    with torch.no_grad():\n",
    "        for j in range(0, eval_data_source.size(0) - 1, max_seq_len):\n",
    "            eval_data, eval_label = return_batch(eval_data_source, j)\n",
    "            sequence_length = eval_data.size(0)\n",
    "            if sequence_length != max_seq_len:\n",
    "                mask_source = mask_source[:sequence_length, :sequence_length]\n",
    "            op = eval_model_obj(eval_data, mask_source)\n",
    "            op_flat = op.view(-1, num_tokens)\n",
    "            loss_total += sequence_length * loss_func(op_flat, eval_label).item()\n",
    "    return loss_total / (len(eval_data_source) - 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 1, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 1, 200/451 batches, training loss 6.59, training perplexity 724.72\n",
      "epoch 1, 300/451 batches, training loss 6.55, training perplexity 700.21\n",
      "epoch 1, 400/451 batches, training loss 6.55, training perplexity 701.72\n",
      "\n",
      "epoch 1, validation loss 6.56, validation perplexity 706.02\n",
      "\n",
      "epoch 2, 100/451 batches, training loss 6.64, training perplexity 761.70\n",
      "epoch 2, 200/451 batches, training loss 6.59, training perplexity 724.71\n",
      "epoch 2, 300/451 batches, training loss 6.55, training perplexity 700.20\n",
      "epoch 2, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 2, validation loss 6.56, validation perplexity 706.02\n",
      "\n",
      "epoch 3, 100/451 batches, training loss 6.64, training perplexity 761.70\n",
      "epoch 3, 200/451 batches, training loss 6.59, training perplexity 724.71\n",
      "epoch 3, 300/451 batches, training loss 6.55, training perplexity 700.21\n",
      "epoch 3, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 3, validation loss 6.56, validation perplexity 706.02\n",
      "\n",
      "epoch 4, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 4, 200/451 batches, training loss 6.59, training perplexity 724.69\n",
      "epoch 4, 300/451 batches, training loss 6.55, training perplexity 700.19\n",
      "epoch 4, 400/451 batches, training loss 6.55, training perplexity 701.72\n",
      "\n",
      "epoch 4, validation loss 6.56, validation perplexity 706.02\n",
      "\n",
      "epoch 5, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 5, 200/451 batches, training loss 6.59, training perplexity 724.70\n",
      "epoch 5, 300/451 batches, training loss 6.55, training perplexity 700.19\n",
      "epoch 5, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 5, validation loss 6.56, validation perplexity 706.02\n",
      "\n",
      "epoch 6, 100/451 batches, training loss 6.64, training perplexity 761.70\n",
      "epoch 6, 200/451 batches, training loss 6.59, training perplexity 724.70\n",
      "epoch 6, 300/451 batches, training loss 6.55, training perplexity 700.18\n",
      "epoch 6, 400/451 batches, training loss 6.55, training perplexity 701.72\n",
      "\n",
      "epoch 6, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 7, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 7, 200/451 batches, training loss 6.59, training perplexity 724.70\n",
      "epoch 7, 300/451 batches, training loss 6.55, training perplexity 700.18\n",
      "epoch 7, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 7, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 8, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 8, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 8, 300/451 batches, training loss 6.55, training perplexity 700.17\n",
      "epoch 8, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 8, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 9, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 9, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 9, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 9, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 9, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 10, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 10, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 10, 300/451 batches, training loss 6.55, training perplexity 700.17\n",
      "epoch 10, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 10, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 11, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 11, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 11, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 11, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 11, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 12, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 12, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 12, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 12, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 12, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 13, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 13, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 13, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 13, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 13, validation loss 6.56, validation perplexity 706.03\n",
      "\n",
      "epoch 14, 100/451 batches, training loss 6.64, training perplexity 761.70\n",
      "epoch 14, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 14, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 14, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 14, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 15, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 15, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 15, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 15, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 15, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 16, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 16, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 16, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 16, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 16, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 17, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 17, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 17, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 17, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 17, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 18, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 18, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 18, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 18, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 18, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 19, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 19, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 19, 300/451 batches, training loss 6.55, training perplexity 700.14\n",
      "epoch 19, 400/451 batches, training loss 6.55, training perplexity 701.68\n",
      "\n",
      "epoch 19, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 20, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 20, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 20, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 20, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 20, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 21, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 21, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 21, 300/451 batches, training loss 6.55, training perplexity 700.14\n",
      "epoch 21, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 21, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 22, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 22, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 22, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 22, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 22, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 23, 100/451 batches, training loss 6.64, training perplexity 761.70\n",
      "epoch 23, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 23, 300/451 batches, training loss 6.55, training perplexity 700.18\n",
      "epoch 23, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 23, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 24, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 24, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 24, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 24, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 24, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 25, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 25, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 25, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 25, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 25, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 26, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 26, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 26, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 26, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 26, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 27, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 27, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 27, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 27, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 27, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 28, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 28, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 28, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 28, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 28, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 29, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 29, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 29, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 29, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 29, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 30, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 30, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 30, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 30, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 30, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 31, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 31, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 31, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 31, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 31, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 32, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 32, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 32, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 32, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 32, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 33, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 33, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 33, 300/451 batches, training loss 6.55, training perplexity 700.14\n",
      "epoch 33, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 33, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 34, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 34, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 34, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 34, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 34, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 35, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 35, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 35, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 35, 400/451 batches, training loss 6.55, training perplexity 701.68\n",
      "\n",
      "epoch 35, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 36, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 36, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 36, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 36, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 36, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 37, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 37, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 37, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 37, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 37, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 38, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 38, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 38, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 38, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 38, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 39, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 39, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 39, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 39, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 39, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 40, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 40, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 40, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 40, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 40, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 41, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 41, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 41, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 41, 400/451 batches, training loss 6.55, training perplexity 701.71\n",
      "\n",
      "epoch 41, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 42, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 42, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 42, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 42, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 42, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 43, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 43, 200/451 batches, training loss 6.59, training perplexity 724.66\n",
      "epoch 43, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 43, 400/451 batches, training loss 6.55, training perplexity 701.68\n",
      "\n",
      "epoch 43, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 44, 100/451 batches, training loss 6.64, training perplexity 761.67\n",
      "epoch 44, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 44, 300/451 batches, training loss 6.55, training perplexity 700.14\n",
      "epoch 44, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 44, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 45, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 45, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 45, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 45, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 45, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 46, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 46, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 46, 300/451 batches, training loss 6.55, training perplexity 700.16\n",
      "epoch 46, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 46, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 47, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 47, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 47, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 47, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 47, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 48, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 48, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 48, 300/451 batches, training loss 6.55, training perplexity 700.15\n",
      "epoch 48, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 48, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 49, 100/451 batches, training loss 6.64, training perplexity 761.69\n",
      "epoch 49, 200/451 batches, training loss 6.59, training perplexity 724.67\n",
      "epoch 49, 300/451 batches, training loss 6.55, training perplexity 700.17\n",
      "epoch 49, 400/451 batches, training loss 6.55, training perplexity 701.69\n",
      "\n",
      "epoch 49, validation loss 6.56, validation perplexity 706.04\n",
      "\n",
      "epoch 50, 100/451 batches, training loss 6.64, training perplexity 761.68\n",
      "epoch 50, 200/451 batches, training loss 6.59, training perplexity 724.68\n",
      "epoch 50, 300/451 batches, training loss 6.55, training perplexity 700.14\n",
      "epoch 50, 400/451 batches, training loss 6.55, training perplexity 701.70\n",
      "\n",
      "epoch 50, validation loss 6.56, validation perplexity 706.04\n",
      "\n"
     ]
    }
   ],
   "source": [
    "min_validation_loss = float(\"inf\")\n",
    "eps = 50\n",
    "best_model_so_far = None\n",
    "\n",
    "for ep in range(1, eps + 1):\n",
    "    ep_time_start = time.time()\n",
    "    train_model()\n",
    "    validation_loss = eval_model(transformer_model, validation_data)\n",
    "    print()\n",
    "    print(f\"epoch {ep:}, validation loss {validation_loss:.2f}, validation perplexity {math.exp(validation_loss):.2f}\")\n",
    "    print()\n",
    "\n",
    "    if validation_loss < min_validation_loss:\n",
    "        min_validation_loss = validation_loss\n",
    "        best_model_so_far = transformer_model\n",
    "\n",
    "    sched_module.step()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "testing loss 6.49, testing perplexity 659.74\n"
     ]
    }
   ],
   "source": [
    "testing_loss = eval_model(best_model_so_far, testing_data)\n",
    "print(f\"testing loss {testing_loss:.2f}, testing perplexity {math.exp(testing_loss):.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "mdl_pth = './transformer.pth'\n",
    "torch.save(best_model_so_far.state_dict(), mdl_pth)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# load the best trained model\n",
    "transformer_cached = Transformer(num_tokens, embedding_size, num_heads, num_hidden_params, num_layers, \n",
    "                                     dropout).to(device)\n",
    "transformer_cached.load_state_dict(torch.load(mdl_pth))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "They are the the the the\n"
     ]
    }
   ],
   "source": [
    "ln = 5\n",
    "sntc = 'They are _'\n",
    "sntc_split = sntc.split()\n",
    "torch.manual_seed(34)\n",
    "mask_source = gen_sqr_nxt_mask(max_seq_len).to(device)\n",
    "with torch.no_grad():\n",
    "    for i in range(ln):\n",
    "        sntc = ' '.join(sntc_split)\n",
    "        txt_ds = Tensor(vocabulary(sntc_split)).unsqueeze(0).to(torch.long)\n",
    "        num_b = txt_ds.size(0)\n",
    "        txt_ds = txt_ds.narrow(0, 0, num_b)\n",
    "        txt_ds = txt_ds.view(1, -1).t().contiguous().to(device)\n",
    "        ev_X, _ = return_batch(txt_ds, i+1)\n",
    "        sequence_length = ev_X.size(0)\n",
    "        if sequence_length != max_seq_len:\n",
    "            mask_source = mask_source[:sequence_length, :sequence_length]\n",
    "        op = transformer_cached(ev_X, mask_source)\n",
    "        op_flat = op.view(-1, num_tokens)\n",
    "        res = vocabulary.get_itos()[op_flat.argmax(1)[0]]\n",
    "        sntc_split.insert(-1, res)\n",
    "print(sntc[:-2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
