{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b734701c",
   "metadata": {},
   "source": [
    "# Homework 5\n",
    "- English to Chinese (Traditional) Translation\n",
    "  - Input: an English sentence         (e.g.\t\ttom is a student .)\n",
    "  - Output: the Chinese translation  (e.g. \t\t湯姆 是 個 學生 。)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3223aa48",
   "metadata": {},
   "outputs": [],
   "source": [
    "!NVIDIA-SMI"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "067ce918",
   "metadata": {},
   "source": [
    "## Attention Block"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "b9049159",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]],\n",
      "\n",
      "        [[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]],\n",
      "\n",
      "        [[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]],\n",
      "\n",
      "        ...,\n",
      "\n",
      "        [[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]],\n",
      "\n",
      "        [[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]],\n",
      "\n",
      "        [[ 1.5690,  0.6026,  0.3232,  ..., -0.7386,  0.1963,  0.4862],\n",
      "         [ 0.5263, -0.4970,  0.1938,  ...,  0.5022,  0.1380,  0.3416],\n",
      "         [ 0.6133, -2.0804,  0.5021,  ...,  0.4373,  0.1556,  0.3065],\n",
      "         ...,\n",
      "         [ 0.3448,  0.3402, -0.8650,  ...,  0.7958,  0.1335,  0.5928],\n",
      "         [ 0.9469, -0.9476,  0.3529,  ...,  0.7802,  0.1253,  0.5251],\n",
      "         [-0.0849, -2.3952,  0.3419,  ...,  0.7336,  0.1213,  0.4604]]],\n",
      "       grad_fn=<AddBackward0>)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "from torch.nn.parameter import Parameter\n",
    "\n",
    "device = torch.device(\"cpu\")\n",
    "\n",
    "\n",
    "class embedding(nn.Module):\n",
    "    def __init__(self, vocab_size, num_units, zeros_pad=True, scale=True):\n",
    "        \"\"\"Embeds a given Variable.\n",
    "        Args:\n",
    "          vocab_size: An int. Vocabulary size.\n",
    "          num_units: An int. Number of embedding hidden units.\n",
    "          zero_pad: A boolean. If True, all the values of the fist row (id 0)\n",
    "            should be constant zeros.\n",
    "          scale: A boolean. If True. the outputs is multiplied by sqrt num_units.\n",
    "        \"\"\"\n",
    "        super(embedding, self).__init__()\n",
    "        self.vocab_size = vocab_size\n",
    "        self.num_units = num_units\n",
    "        self.zeros_pad = zeros_pad\n",
    "        self.scale = scale\n",
    "        # initialize weight with xavier's method\n",
    "        self.lookup_table = Parameter(torch.Tensor(vocab_size, num_units))\n",
    "        nn.init.xavier_normal_(self.lookup_table.data)\n",
    "        if self.zeros_pad:\n",
    "            self.lookup_table.data[0, :].fill_(0)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        if self.zeros_pad:\n",
    "            self.padding_idx = 0\n",
    "        else:\n",
    "            self.padding_idx = -1\n",
    "        outputs = F.embedding(\n",
    "            inputs, self.lookup_table, self.padding_idx, None, 2, False, False\n",
    "        )  # copied from torch.nn.modules.sparse.py\n",
    "\n",
    "        if self.scale:\n",
    "            outputs = outputs * (self.num_units ** 0.5)\n",
    "\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class layer_normalization(nn.Module):\n",
    "    def __init__(self, features, epsilon=1e-8):\n",
    "        \"\"\"Applies layer normalization.\n",
    "\n",
    "        Args:\n",
    "          epsilon: A floating number. A very small number for preventing ZeroDivision Error.\n",
    "        \"\"\"\n",
    "        super(layer_normalization, self).__init__()\n",
    "        self.epsilon = epsilon\n",
    "        self.gamma = nn.Parameter(torch.ones(features))\n",
    "        self.beta = nn.Parameter(torch.zeros(features))\n",
    "\n",
    "    def forward(self, x):\n",
    "        mean = x.mean(-1, keepdim=True)\n",
    "        std = x.std(-1, keepdim=True)\n",
    "        return self.gamma * (x - mean) / (std + self.epsilon) + self.beta\n",
    "\n",
    "\n",
    "class positional_encoding(nn.Module):\n",
    "    def __init__(self, num_units, zeros_pad=True, scale=True):\n",
    "        \"\"\"Sinusoidal Positional_Encoding.\n",
    "\n",
    "        Args:\n",
    "          num_units: Output dimensionality\n",
    "          zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero\n",
    "          scale: Boolean. If True, the output will be multiplied by sqrt num_units(check details from paper)\n",
    "        \"\"\"\n",
    "        super(positional_encoding, self).__init__()\n",
    "        self.num_units = num_units\n",
    "        self.zeros_pad = zeros_pad\n",
    "        self.scale = scale\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # inputs: A 2d Tensor with shape of (N, T).\n",
    "        N, T = inputs.size()[0:2]\n",
    "\n",
    "        # First part of the PE function: sin and cos argument\n",
    "        position_ind = torch.unsqueeze(torch.arange(0, T), 0).repeat(N, 1).long()\n",
    "        position_enc = torch.Tensor(\n",
    "            [[pos / np.power(10000, 2.0 * i / self.num_units) for i in range(self.num_units)]\n",
    "                for pos in range(T)\n",
    "            ])\n",
    "\n",
    "        # Second part, apply the cosine to even columns and sin to odds.\n",
    "        position_enc[:, 0::2] = torch.sin(position_enc[:, 0::2])  # dim 2i\n",
    "        position_enc[:, 1::2] = torch.cos(position_enc[:, 1::2])  # dim 2i+1\n",
    "\n",
    "        lookup_table = position_enc\n",
    "\n",
    "        if self.zeros_pad:\n",
    "            lookup_table = torch.cat(\n",
    "                (torch.zeros(1, self.num_units), lookup_table[1:, :]), 0\n",
    "            )\n",
    "            padding_idx = 0\n",
    "        else:\n",
    "            padding_idx = -1\n",
    "\n",
    "        outputs = F.embedding(\n",
    "            position_ind, lookup_table, padding_idx, None, 2, False, False\n",
    "        )  # copied from torch.nn.modules.sparse.py\n",
    "\n",
    "        if self.scale:\n",
    "            outputs = outputs * self.num_units ** 0.5\n",
    "\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class multihead_attention(nn.Module):\n",
    "    def __init__(self, num_units, num_heads=8, dropout_rate=0, causality=False):\n",
    "        \"\"\"Applies multihead attention.\n",
    "\n",
    "        Args:\n",
    "            num_units: A scalar. Attention size.\n",
    "            dropout_rate: A floating point number.\n",
    "            causality: Boolean. If true, units that reference the future are masked.\n",
    "            num_heads: An int. Number of heads.\n",
    "        \"\"\"\n",
    "        super(multihead_attention, self).__init__()\n",
    "        self.num_units = num_units\n",
    "        self.num_heads = num_heads\n",
    "        self.dropout_rate = dropout_rate\n",
    "        self.causality = causality\n",
    "        self.Q_proj = nn.Sequential(\n",
    "            nn.Linear(self.num_units, self.num_units), nn.ReLU()\n",
    "        )\n",
    "        self.K_proj = nn.Sequential(\n",
    "            nn.Linear(self.num_units, self.num_units), nn.ReLU()\n",
    "        )\n",
    "        self.V_proj = nn.Sequential(\n",
    "            nn.Linear(self.num_units, self.num_units), nn.ReLU()\n",
    "        )\n",
    "\n",
    "        self.output_dropout = nn.Dropout(p=self.dropout_rate)\n",
    "\n",
    "        self.normalization = layer_normalization(self.num_units)\n",
    "\n",
    "    def forward(self, queries, keys, values):\n",
    "        # keys, values: same shape of [N, T_k, C_k]\n",
    "        # queries: A 3d Variable with shape of [N, T_q, C_q]\n",
    "\n",
    "        # Linear projections\n",
    "        Q = self.Q_proj(queries)  # (N, T_q, C)\n",
    "        K = self.K_proj(keys)  # (N, T_q, C)\n",
    "        V = self.V_proj(values)  # (N, T_q, C)\n",
    "\n",
    "        # Split and concat\n",
    "        Q_ = torch.cat(torch.chunk(Q, self.num_heads, dim=2), dim=0)  # (h*N, T_q, C/h)\n",
    "        K_ = torch.cat(torch.chunk(K, self.num_heads, dim=2), dim=0)  # (h*N, T_q, C/h)\n",
    "        V_ = torch.cat(torch.chunk(V, self.num_heads, dim=2), dim=0)  # (h*N, T_q, C/h)\n",
    "\n",
    "        # Multiplication - batch matrix multiply\n",
    "        outputs = torch.bmm(Q_, K_.permute(0, 2, 1))  # (h*N, T_q, T_k)\n",
    "\n",
    "        # Scale\n",
    "        outputs = outputs / (K_.size()[-1] ** 0.5)\n",
    "\n",
    "        # Key Masking\n",
    "        key_masks = torch.sign(torch.abs(torch.sum(keys, dim=-1)))  # (N, T_k)\n",
    "        key_masks = key_masks.repeat(self.num_heads, 1)  # (h*N, T_k)\n",
    "        key_masks = torch.unsqueeze(key_masks, 1).repeat(\n",
    "            1, queries.size()[1], 1\n",
    "        )  # (h*N, T_q, T_k)\n",
    "\n",
    "        padding = torch.ones(*outputs.size()).to(device) * (-(2 ** 32) + 1)\n",
    "        condition = key_masks.eq(0.0).float()\n",
    "        outputs = padding * condition + outputs * (1.0 - condition)\n",
    "\n",
    "        # Causality = Future blinding\n",
    "        if self.causality:\n",
    "            diag_vals = torch.ones(*outputs[0, :, :].size()).to(device)  # (T_q, T_k)\n",
    "            tril = torch.tril(diag_vals, diagonal=0)  # (T_q, T_k)\n",
    "            # print(tril)\n",
    "            masks = torch.unsqueeze(tril, 0).repeat(\n",
    "                outputs.size()[0], 1, 1\n",
    "            )  # (h*N, T_q, T_k)\n",
    "\n",
    "            padding = torch.ones(*masks.size()).to(device) * (-(2 ** 32) + 1)\n",
    "            condition = masks.eq(0.0).float()\n",
    "            outputs = padding * condition + outputs * (1.0 - condition)\n",
    "\n",
    "        # Activation\n",
    "        outputs = F.softmax(outputs, dim=-1)  # (h*N, T_q, T_k)\n",
    "\n",
    "        # Query Masking\n",
    "        query_masks = torch.sign(torch.abs(torch.sum(queries, dim=-1)))  # (N, T_q)\n",
    "        query_masks = query_masks.repeat(self.num_heads, 1)  # (h*N, T_q)\n",
    "        query_masks = torch.unsqueeze(query_masks, 2).repeat(\n",
    "            1, 1, keys.size()[1]\n",
    "        )  # (h*N, T_q, T_k)\n",
    "        outputs = outputs * query_masks\n",
    "\n",
    "        # Dropouts\n",
    "        outputs = self.output_dropout(outputs)  # (h*N, T_q, T_k)\n",
    "\n",
    "        # Weighted sum\n",
    "        outputs = torch.bmm(outputs, V_)  # (h*N, T_q, C/h)\n",
    "\n",
    "        # Restore shape\n",
    "        outputs = torch.cat(\n",
    "            torch.chunk(outputs, self.num_heads, dim=0), dim=2\n",
    "        )  # (N, T_q, C)\n",
    "\n",
    "        # Residual connection\n",
    "        outputs += queries\n",
    "\n",
    "        # Normalize\n",
    "        outputs = self.normalization(outputs)  # (N, T_q, C)\n",
    "\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class feedforward(nn.Module):\n",
    "    def __init__(self, in_channels, num_units=[2048, 512]):\n",
    "        \"\"\"Point-wise feed forward net.\n",
    "\n",
    "        Args:\n",
    "          in_channels: a number of channels of inputs\n",
    "          num_units: A list of two integers.\n",
    "        \"\"\"\n",
    "        super(feedforward, self).__init__()\n",
    "        self.in_channels = in_channels\n",
    "        self.num_units = num_units\n",
    "\n",
    "        # nn.Linear is faster than nn.Conv1d\n",
    "        self.conv = False\n",
    "        if self.conv:\n",
    "            params = {\n",
    "                \"in_channels\": self.in_channels,\n",
    "                \"out_channels\": self.num_units[0],\n",
    "                \"kernel_size\": 1,\n",
    "                \"stride\": 1,\n",
    "                \"bias\": True,\n",
    "            }\n",
    "            self.conv1 = nn.Sequential(nn.Conv1d(**params), nn.ReLU())\n",
    "            params = {\n",
    "                \"in_channels\": self.num_units[0],\n",
    "                \"out_channels\": self.num_units[1],\n",
    "                \"kernel_size\": 1,\n",
    "                \"stride\": 1,\n",
    "                \"bias\": True,\n",
    "            }\n",
    "            self.conv2 = nn.Conv1d(**params)\n",
    "        else:\n",
    "            self.conv1 = nn.Sequential(\n",
    "                nn.Linear(self.in_channels, self.num_units[0]), nn.ReLU()\n",
    "            )\n",
    "            self.conv2 = nn.Linear(self.num_units[0], self.num_units[1])\n",
    "        self.normalization = layer_normalization(self.in_channels)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        if self.conv:\n",
    "            inputs = inputs.permute(0, 2, 1)\n",
    "        outputs = self.conv1(inputs)\n",
    "        outputs = self.conv2(outputs)\n",
    "\n",
    "        # Residual connection\n",
    "        outputs += inputs\n",
    "\n",
    "        # Layer normalization\n",
    "        if self.conv:\n",
    "            outputs = self.normalization(outputs.permute(0, 2, 1))\n",
    "        else:\n",
    "            outputs = self.normalization(outputs)\n",
    "\n",
    "        return outputs\n",
    "\n",
    "\n",
    "class label_smoothing(nn.Module):\n",
    "    def __init__(self, epsilon=0.1):\n",
    "        \"\"\"Applies label smoothing. See https://arxiv.org/abs/1512.00567.\n",
    "\n",
    "        Args:\n",
    "            epsilon: Smoothing rate.\n",
    "        \"\"\"\n",
    "        super(label_smoothing, self).__init__()\n",
    "        self.epsilon = epsilon\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        K = inputs.size()[-1]\n",
    "        return ((1 - self.epsilon) * inputs) + (self.epsilon / K)\n",
    "\n",
    "\n",
    "num_units = 512\n",
    "inputs = torch.randn((100, 10))\n",
    "outputs = positional_encoding(num_units)(inputs)\n",
    "outputs = multihead_attention(num_units)(outputs, outputs, outputs)\n",
    "outputs = feedforward(num_units)(outputs)\n",
    "\n",
    "print(outputs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6a5e013",
   "metadata": {},
   "outputs": [],
   "source": [
    "class AttModel(nn.Module):\n",
    "    def __init__(self, hp_, enc_voc, dec_voc):\n",
    "        \"\"\"Attention is all you need. https://arxiv.org/abs/1706.03762\n",
    "        Args:\n",
    "            hp: Hyper Parameters\n",
    "            enc_voc: vocabulary size of encoder language\n",
    "            dec_voc: vacabulary size of decoder language\n",
    "        \"\"\"\n",
    "        super(AttModel, self).__init__()\n",
    "        self.hp = hp_\n",
    "        self.enc_voc = enc_voc\n",
    "        self.dec_voc = dec_voc\n",
    "\n",
    "        # encoder\n",
    "        self.enc_emb = embedding(self.enc_voc, self.hp.hidden_units, scale=True)\n",
    "\n",
    "        if self.hp.sinusoid:\n",
    "            self.enc_positional_encoding = positional_encoding(\n",
    "                num_units=self.hp.hidden_units, zeros_pad=False, scale=False\n",
    "            )\n",
    "        else:\n",
    "            self.enc_positional_encoding = embedding(\n",
    "                self.hp.maxlen, self.hp.hidden_units, zeros_pad=False, scale=False\n",
    "            )\n",
    "        self.enc_dropout = nn.Dropout(self.hp.dropout_rate)\n",
    "        for i in range(self.hp.num_blocks):\n",
    "            self.__setattr__(\n",
    "                \"enc_self_attention_%d\" % i,\n",
    "                multihead_attention(\n",
    "                    num_units=self.hp.hidden_units,\n",
    "                    num_heads=self.hp.num_heads,\n",
    "                    dropout_rate=self.hp.dropout_rate,\n",
    "                    causality=False,\n",
    "                ),\n",
    "            )\n",
    "            self.__setattr__(\n",
    "                \"enc_feed_forward_%d\" % i,\n",
    "                feedforward(\n",
    "                    self.hp.hidden_units,\n",
    "                    [4 * self.hp.hidden_units, self.hp.hidden_units],\n",
    "                ),\n",
    "            )\n",
    "\n",
    "        # decoder\n",
    "        self.dec_emb = embedding(self.dec_voc, self.hp.hidden_units, scale=True)\n",
    "        if self.hp.sinusoid:\n",
    "            self.dec_positional_encoding = positional_encoding(\n",
    "                num_units=self.hp.hidden_units, zeros_pad=False, scale=False\n",
    "            )\n",
    "        else:\n",
    "            self.dec_positional_encoding = embedding(\n",
    "                self.hp.maxlen, self.hp.hidden_units, zeros_pad=False, scale=False\n",
    "            )\n",
    "\n",
    "        self.dec_dropout = nn.Dropout(self.hp.dropout_rate)\n",
    "        for i in range(self.hp.num_blocks):\n",
    "            self.__setattr__(\n",
    "                \"dec_self_attention_%d\" % i,\n",
    "                multihead_attention(\n",
    "                    num_units=self.hp.hidden_units,\n",
    "                    num_heads=self.hp.num_heads,\n",
    "                    dropout_rate=self.hp.dropout_rate,\n",
    "                    causality=True,\n",
    "                ),\n",
    "            )\n",
    "            self.__setattr__(\n",
    "                \"dec_vanilla_attention_%d\" % i,\n",
    "                multihead_attention(\n",
    "                    num_units=self.hp.hidden_units,\n",
    "                    num_heads=self.hp.num_heads,\n",
    "                    dropout_rate=self.hp.dropout_rate,\n",
    "                    causality=False,\n",
    "                ),\n",
    "            )\n",
    "            self.__setattr__(\n",
    "                \"dec_feed_forward_%d\" % i,\n",
    "                feedforward(\n",
    "                    self.hp.hidden_units,\n",
    "                    [4 * self.hp.hidden_units, self.hp.hidden_units],\n",
    "                ),\n",
    "            )\n",
    "        self.logits_layer = nn.Linear(self.hp.hidden_units, self.dec_voc)\n",
    "        self.label_smoothing = label_smoothing()\n",
    "        # self.losslayer = nn.CrossEntropyLoss(reduce=False)\n",
    "\n",
    "    def forward(self, x, y):\n",
    "        # define decoder inputs\n",
    "        self.decoder_inputs = torch.cat(\n",
    "            [torch.ones(y[:, :1].size()).to(device).long() * 2, y[:, :-1]], dim=-1,\n",
    "        )  # 2:<S>\n",
    "\n",
    "        # Encoder\n",
    "        self.enc = self.enc_emb(x)\n",
    "        # Positional Encoding\n",
    "        if self.hp.sinusoid:\n",
    "            self.enc += self.enc_positional_encoding(x)\n",
    "        else:\n",
    "            self.enc += self.enc_positional_encoding(\n",
    "                torch.unsqueeze(torch.arange(0, x.size()[1]), 0)\n",
    "                .repeat(x.size(0), 1)\n",
    "                .long()\n",
    "                .to(device)\n",
    "            )\n",
    "        self.enc = self.enc_dropout(self.enc)\n",
    "        # Blocks\n",
    "        for i in range(self.hp.num_blocks):\n",
    "            self.enc = self.__getattr__(\"enc_self_attention_%d\" % i)(\n",
    "                self.enc, self.enc, self.enc\n",
    "            )\n",
    "            # Feed Forward\n",
    "            self.enc = self.__getattr__(\"enc_feed_forward_%d\" % i)(self.enc)\n",
    "        # Decoder\n",
    "        self.dec = self.dec_emb(self.decoder_inputs)\n",
    "        # Positional Encoding\n",
    "        if self.hp.sinusoid:\n",
    "            self.dec += self.dec_positional_encoding(self.decoder_inputs)\n",
    "        else:\n",
    "            self.dec += self.dec_positional_encoding(\n",
    "                torch.unsqueeze(torch.arange(0, self.decoder_inputs.size()[1]), 0)\n",
    "                .repeat(self.decoder_inputs.size(0), 1)\n",
    "                .long()\n",
    "                .to(device)\n",
    "            )\n",
    "\n",
    "        # Dropout\n",
    "        self.dec = self.dec_dropout(self.dec)\n",
    "        # Blocks\n",
    "        for i in range(self.hp.num_blocks):\n",
    "            # self-attention\n",
    "            self.dec = self.__getattr__(\"dec_self_attention_%d\" % i)(\n",
    "                self.dec, self.dec, self.dec\n",
    "            )\n",
    "            # vanilla attention\n",
    "            self.dec = self.__getattr__(\"dec_vanilla_attention_%d\" % i)(\n",
    "                self.dec, self.enc, self.enc\n",
    "            )\n",
    "            # feed forward\n",
    "            self.dec = self.__getattr__(\"dec_feed_forward_%d\" % i)(self.dec)\n",
    "\n",
    "        # Final linear projection\n",
    "        self.logits = self.logits_layer(self.dec)\n",
    "        self.probs = F.softmax(self.logits, dim=-1).view(-1, self.dec_voc)\n",
    "        _, self.preds = torch.max(self.logits, -1)\n",
    "        self.istarget = (1.0 - y.eq(0.0).float()).view(-1)\n",
    "        self.acc = torch.sum(\n",
    "            self.preds.eq(y).float().view(-1) * self.istarget\n",
    "        ) / torch.sum(self.istarget)\n",
    "\n",
    "        # Loss\n",
    "        self.y_onehot = torch.zeros(\n",
    "            self.logits.size()[0] * self.logits.size()[1], self.dec_voc\n",
    "        ).to(device)\n",
    "        self.y_onehot = self.y_onehot.scatter_(1, y.view(-1, 1).data, 1)\n",
    "\n",
    "        self.y_smoothed = self.label_smoothing(self.y_onehot)\n",
    "\n",
    "        self.loss = -torch.sum(self.y_smoothed * torch.log(self.probs), dim=-1)\n",
    "        self.mean_loss = torch.sum(self.loss * self.istarget) / torch.sum(self.istarget)\n",
    "\n",
    "        return self.mean_loss, self.preds, self.acc\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
