{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "import csv\n",
    "import pandas as pd\n",
    "import re\n",
    "\n",
    "# 加载数据集\n",
    "data = pd.read_csv('lang.csv')\n",
    "\n",
    "# 去除多余的空格并确保每个单词之间只有一个空格\n",
    "data['Chinese'] = data['Chinese'].apply(lambda x: ' '.join(re.sub(r'\\s+', ' ', x).strip().split()))\n",
    "data['English'] = data['English'].apply(lambda x: ' '.join(re.sub(r'\\s+', ' ', x).strip().split()))\n",
    "\n",
    "# 将中文和英文数据分别保存为txt文件\n",
    "with open(\"chinese.txt\", 'w', encoding='utf-8') as f:\n",
    "    f.write(\"\\n\".join(data['Chinese']))\n",
    "\n",
    "with open(\"english.txt\", 'w', encoding='utf-8') as f:\n",
    "    f.write(\"\\n\".join(data['English']))\n",
    "\n",
    "from tokenizers import ByteLevelBPETokenizer\n",
    "from tqdm.notebook import tqdm\n",
    "from transformers import RobertaTokenizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.optim import AdamW\n",
    "from torchtext.data.metrics import bleu_score\n",
    "from transformers.optimization import get_linear_schedule_with_warmup\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import math\n",
    "\n",
    "# 创建中文的字节级BPE标记器\n",
    "tokenizer_chinese = ByteLevelBPETokenizer()\n",
    "tokenizer_chinese.train(['chinese.txt'], vocab_size=50000, min_frequency=2, special_tokens=['<s>', '<pad>', '</s>', '<unk>', '<mask>'])\n",
    "\n",
    "# 创建英文的字节级BPE标记器\n",
    "tokenizer_english = ByteLevelBPETokenizer()\n",
    "tokenizer_english.train(['english.txt'], vocab_size=50000, min_frequency=3, special_tokens=['<s>', '<pad>', '</s>', '<unk>', '<mask>'])\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Software\\Python\\lib\\site-packages\\torchtext\\data\\__init__.py:4: UserWarning: \n",
      "/!\\ IMPORTANT WARNING ABOUT TORCHTEXT STATUS /!\\ \n",
      "Torchtext is deprecated and the last released version will be 0.18 (this one). You can silence this warning by calling the following at the beginnign of your scripts: `import torchtext; torchtext.disable_torchtext_deprecation_warning()`\n",
      "  warnings.warn(torchtext._TORCHTEXT_DEPRECATION_MSG)\n"
     ]
    }
   ],
   "source": [
    "# 定义嵌入层\n",
    "class Embedding(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim):\n",
    "        super(Embedding, self).__init__()\n",
    "        self.embedd = nn.Embedding(vocab_size, embed_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        out = self.embedd(x)\n",
    "        return out\n",
    "\n",
    "# 定义位置嵌入层\n",
    "class PositionEmbedding(nn.Module):\n",
    "    def __init__(self, max_sen=5000, dim_data=512):\n",
    "        super(PositionEmbedding, self).__init__()\n",
    "        self.dim = dim_data\n",
    "\n",
    "        pos = torch.zeros(max_sen, dim_data)\n",
    "\n",
    "        for x in range(max_sen):\n",
    "            for y in range(0, dim_data, 2):\n",
    "                pos[x, y] = math.sin(x / (1000 ** (2 * y / self.dim)))\n",
    "                pos[x, y + 1] = math.cos(x / (1000 ** ((2 * (y + 1)) / self.dim)))\n",
    "\n",
    "        pos = torch.unsqueeze(pos, 0)\n",
    "        self.register_buffer('pe', pos)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x * math.sqrt(self.dim)\n",
    "        sequence_length = x.size(1)\n",
    "        output = x + torch.autograd.Variable(self.pe[:, :sequence_length], requires_grad=False)\n",
    "        return output\n",
    "\n",
    "# 定义多头注意力机制\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, n_heads=8, dim=512):\n",
    "        super(MultiHeadAttention, self).__init__()\n",
    "        self.size_of_matrix = int(dim / n_heads)\n",
    "        self.n_heads = n_heads\n",
    "        self.key = nn.Linear(dim, dim, bias=False)\n",
    "        self.query = nn.Linear(dim, dim, bias=False)\n",
    "        self.value = nn.Linear(dim, dim, bias=False)\n",
    "        self.out = nn.Linear(n_heads * self.size_of_matrix, dim)\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "\n",
    "    def forward(self, key, value, query, mask=None):\n",
    "        batch_size = key.size(0)\n",
    "        seq_length = key.size(1)\n",
    "        key = self.key(key)\n",
    "        value = self.value(value)\n",
    "        query = self.query(query)\n",
    "        key = key.view(batch_size, seq_length, self.n_heads, self.size_of_matrix)\n",
    "        value = value.view(batch_size, seq_length, self.n_heads, self.size_of_matrix)\n",
    "        query = query.view(batch_size, seq_length, self.n_heads, self.size_of_matrix)\n",
    "        key = key.transpose(1, 2)\n",
    "        query = query.transpose(1, 2)\n",
    "        value = value.transpose(1, 2)\n",
    "        scalar_output = torch.matmul(query, key.transpose(-2, -1))\n",
    "        scalar_output_square_root = scalar_output / math.sqrt(self.size_of_matrix)\n",
    "        if mask is not None:\n",
    "            mask = mask.unsqueeze(1)\n",
    "            scalar_output_square_root = scalar_output_square_root.masked_fill(mask == 0, float('-inf'))\n",
    "        output_from_softmax = F.softmax(scalar_output_square_root, dim=-1)\n",
    "        multiplication_with_value = torch.matmul(self.dropout(output_from_softmax), value)\n",
    "        concat = multiplication_with_value.transpose(1, 2).contiguous().view(batch_size, -1, self.n_heads * self.size_of_matrix)\n",
    "        output_from_multihead = self.out(concat)\n",
    "        return output_from_multihead, output_from_softmax\n",
    "\n",
    "# 定义编码器层\n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self, dim, expansion_factor, num_heads=8):\n",
    "        super(EncoderLayer, self).__init__()\n",
    "        self.attn = MultiHeadAttention(num_heads, dim)\n",
    "        self.dropout = nn.Dropout(0.2)\n",
    "        self.LayerNorm1 = nn.LayerNorm(dim)\n",
    "        self.LayerNorm2 = nn.LayerNorm(dim)\n",
    "        self.feedforward_network = nn.Sequential(\n",
    "            nn.Linear(dim, expansion_factor * dim),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(expansion_factor * dim, dim)\n",
    "        )\n",
    "\n",
    "    def forward(self, x, mask=None):\n",
    "        x1, att_prob = self.attn(x, x, x, mask)\n",
    "        x = self.LayerNorm1(self.dropout(x1) + x)\n",
    "        x1 = self.feedforward_network(x)\n",
    "        x = self.LayerNorm2(self.dropout(x1) + x)\n",
    "        return x, att_prob\n",
    "\n",
    "# 定义编码器块\n",
    "class TransformerEncoderBlock(nn.Module):\n",
    "    def __init__(self, src_length, expansion_factor, dim=512, num_heads=8, max_sequence_length=5000):\n",
    "        super(TransformerEncoderBlock, self).__init__()\n",
    "        self.Embedding = Embedding(src_length, dim)\n",
    "        self.num_layers = 6\n",
    "        self.positionEmbedding = PositionEmbedding(max_sen=max_sequence_length, dim_data=dim)\n",
    "        self.layers = nn.ModuleList([\n",
    "            EncoderLayer(dim, expansion_factor, num_heads)\n",
    "            for _ in range(self.num_layers)\n",
    "        ])\n",
    "\n",
    "    def forward(self, x, mask):\n",
    "        embedded_out = self.Embedding(x)\n",
    "        x = self.positionEmbedding(embedded_out)\n",
    "        for layer in self.layers:\n",
    "            x, attn_prob = layer(x, mask)\n",
    "        return x, attn_prob\n",
    "\n",
    "# 定义解码器层\n",
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self, expansion_factor, dim=512, num_heads=8):\n",
    "        super(DecoderLayer, self).__init__()\n",
    "        self.attn = MultiHeadAttention(num_heads, dim)\n",
    "        self.attn2 = MultiHeadAttention(num_heads, dim)\n",
    "        self.dropout = nn.Dropout(0.2)\n",
    "        self.LayerNorm1 = nn.LayerNorm(dim)\n",
    "        self.feedforward_network = nn.Sequential(\n",
    "            nn.Linear(dim, expansion_factor * dim),\n",
    "            nn.ReLU(),\n",
    "            nn.Linear(expansion_factor * dim, dim)\n",
    "        )\n",
    "\n",
    "    def forward(self, x, encoder_out, target_mask, src_mask):\n",
    "        x1, _ = self.attn(x, x, x, target_mask)\n",
    "        x2 = self.LayerNorm1(self.dropout(x1) + x)\n",
    "        x, attn_prob = self.attn2(encoder_out, encoder_out, x2, src_mask)\n",
    "        x = self.LayerNorm1(self.dropout(x2) + x)\n",
    "        x1 = self.feedforward_network(x)\n",
    "        x = self.LayerNorm1(self.dropout(x1) + x)\n",
    "        return x, attn_prob\n",
    "\n",
    "# 定义解码器块\n",
    "class TransformerDecoderBlock(nn.Module):\n",
    "    def __init__(self, target_vocab_size, expansion_factor, dim=512, max_sequence_length=5000, num_heads=8):\n",
    "        super(TransformerDecoderBlock, self).__init__()\n",
    "        self.embedding = Embedding(target_vocab_size, dim)\n",
    "        self.position_embedding = PositionEmbedding(max_sen=max_sequence_length, dim_data=dim)\n",
    "        self.dropout = nn.Dropout(0.2)\n",
    "        self.layers = nn.ModuleList([\n",
    "            DecoderLayer(expansion_factor, dim, num_heads)\n",
    "            for _ in range(6)\n",
    "        ])\n",
    "\n",
    "    def forward(self, x, encoder_output, masked_target, src_mask):\n",
    "        embedded_vector = self.embedding(x)\n",
    "        position = self.position_embedding(embedded_vector)\n",
    "        x = self.dropout(position)\n",
    "        for layer in self.layers:\n",
    "            x, attn_prob = layer(x, encoder_output, masked_target, src_mask)\n",
    "        return x, attn_prob\n",
    "\n",
    "# 定义Transformer模型\n",
    "class TransformerModel(nn.Module):\n",
    "    def __init__(self, vocab_size_source, vocab_size_target, dim, expansion_factor, DEVICE):\n",
    "        super(TransformerModel, self).__init__()\n",
    "        self.encoder = TransformerEncoderBlock(vocab_size_source, expansion_factor, dim)\n",
    "        self.decoder = TransformerDecoderBlock(vocab_size_target, expansion_factor, dim)\n",
    "        self.logits = nn.Linear(dim, vocab_size_target)\n",
    "\n",
    "    def mask_target(self, target):\n",
    "        batch_size, seq_length = target.size(0), target.size(1)\n",
    "        masked_target = torch.tril(torch.ones(seq_length, seq_length)).expand(1, seq_length, seq_length)\n",
    "        return masked_target.to(DEVICE, dtype=torch.int64)\n",
    "\n",
    "    def pad_mask(self, x, idx=1):\n",
    "        mask = (x != 1).unsqueeze(-2)\n",
    "        return mask.to(DEVICE)\n",
    "\n",
    "    def forward(self, x, y):\n",
    "        src_mask = self.pad_mask(x).to(DEVICE)\n",
    "        encoder_output, _ = self.encoder(x, src_mask)\n",
    "        masked_target = torch.bitwise_and(self.pad_mask(y), self.mask_target(y))\n",
    "        decoder_output, attn_prob = self.decoder(y, encoder_output, masked_target, src_mask)\n",
    "        output = self.logits(decoder_output)\n",
    "        probs_output = F.log_softmax(output, dim=-1)\n",
    "        return probs_output\n",
    "\n",
    "# 创建中文到英文数据集\n",
    "class ChineseToEnglishDataset:\n",
    "    def __init__(self, chinese, english, tokenizer_en, tokenizer_zh, max_length=60):\n",
    "        self.chinese = chinese\n",
    "        self.english = english\n",
    "        self.max_len_en = max_length - 1\n",
    "        self.max_len_zh = max_length\n",
    "        self.tokenizer_en = tokenizer_en\n",
    "        self.tokenizer_zh = tokenizer_zh\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.chinese)\n",
    "\n",
    "    def __getitem__(self, item):\n",
    "        chinese = self.tokenizer_zh.encode(self.chinese[item]).ids\n",
    "        english = self.tokenizer_en.encode(self.english[item]).ids\n",
    "\n",
    "        if len(chinese) > self.max_len_zh - 2:\n",
    "            chinese = [0] + chinese[:self.max_len_zh - 2] + [2]\n",
    "        else:\n",
    "            chinese = [0] + chinese + [2]\n",
    "        if len(chinese) < self.max_len_zh:\n",
    "            chinese = chinese + [1] * (self.max_len_zh - len(chinese))\n",
    "\n",
    "        if len(english) > self.max_len_en - 2:\n",
    "            english = [0] + english[:self.max_len_en - 2] + [2]\n",
    "        else:\n",
    "            english = [0] + english + [2]\n",
    "        if len(english) < self.max_len_en:\n",
    "            english = english + [1] * (self.max_len_en - len(english))\n",
    "\n",
    "        return {\n",
    "            \"src\": torch.tensor(chinese, dtype=torch.long),\n",
    "            \"target\": torch.tensor(english, dtype=torch.long)\n",
    "        }\n",
    "\n",
    "# 定义损失函数\n",
    "def loss_fn(output, target):\n",
    "    loss_cal = nn.CrossEntropyLoss(ignore_index=1)\n",
    "    loss = loss_cal(output, target)\n",
    "    return loss\n",
    "\n",
    "# 将id转换为文本\n",
    "def convert_id_to_text(ids, tokenizer, eos_idx):\n",
    "    if ids.dim() == 1:\n",
    "        output_tokens = []\n",
    "        for token_id in ids:\n",
    "            data_list = [token_id.item()]\n",
    "            if token_id == eos_idx:\n",
    "                break\n",
    "            else:\n",
    "                output_tokens.append(tokenizer.decode(data_list).strip())\n",
    "        return output_tokens\n",
    "    elif ids.dim() == 2:\n",
    "        return [convert_id_to_text(ids[i, :], tokenizer, eos_idx) for i in range(ids.size(0))]\n",
    "    raise RuntimeError(f'Expected 1 or 2 dimensions, but got {ids.dim()}')\n",
    "\n",
    "# 评估函数\n",
    "def eval_fn(val_data, model):\n",
    "    model.eval()\n",
    "    total_loss = 0\n",
    "    steps = 0\n",
    "    hypotheses = []\n",
    "    references = []\n",
    "    with torch.no_grad():\n",
    "        tk = tqdm(val_data, total=len(val_data))\n",
    "        for batch_size, data in enumerate(tk):\n",
    "            src = data['src']\n",
    "            target = data['target']\n",
    "            src = src.to(DEVICE, dtype=torch.long)\n",
    "            target = target.to(DEVICE, dtype=torch.long)\n",
    "            output = model(src, target[:, :-1])\n",
    "            loss = loss_fn(output.view(-1, output.size(-1)), target[:, 1:].contiguous().view(-1))\n",
    "            total_loss += loss.item()\n",
    "            steps += 1\n",
    "            output = output.argmax(dim=-1)\n",
    "            target = target[:, 1:]\n",
    "            pred_tokens = convert_id_to_text(output, tokenizer_english, 2)\n",
    "            actual_tokens = convert_id_to_text(target, tokenizer_english, 2)\n",
    "            hypotheses += pred_tokens\n",
    "            references += [[token] for token in actual_tokens]\n",
    "    perplexity = np.log(total_loss / len(val_data))\n",
    "    bleu_data = bleu_score(hypotheses, references)\n",
    "    return bleu_data, perplexity\n",
    "\n",
    "# 训练函数\n",
    "def train_fn(train_data, model, optimizer, clip=1.0, scheduler=None):\n",
    "    model.train()\n",
    "    total_loss = 0\n",
    "    steps = 0\n",
    "    tk = tqdm(train_data, total=len(train_data))\n",
    "    for batch_size, train_data in enumerate(tk):\n",
    "        src = train_data['src']\n",
    "        target = train_data['target']\n",
    "        src = src.to(DEVICE, dtype=torch.long)\n",
    "        target = target.to(DEVICE, dtype=torch.long)\n",
    "        optimizer.zero_grad()\n",
    "        output = model(src, target[:, :-1])\n",
    "        loss = loss_fn(output.view(-1, output.size(-1)), target[:, 1:].contiguous().view(-1))\n",
    "        total_loss += loss.item()\n",
    "        steps += 1\n",
    "        output = output.argmax(dim=-1)\n",
    "        loss.backward()\n",
    "        nn.utils.clip_grad_norm_(model.parameters(), clip)\n",
    "        optimizer.step()\n",
    "    perplexity = np.log(total_loss / len(train_data))\n",
    "    return output, perplexity\n",
    "\n",
    "# 运行训练和评估\n",
    "def run():\n",
    "    df_train, df_valid = train_test_split(data, test_size=0.3, random_state=42)\n",
    "    df_train = df_train.reset_index(drop=True)\n",
    "    df_valid = df_valid.reset_index(drop=True)\n",
    "    train_data = ChineseToEnglishDataset(\n",
    "        chinese=df_train.Chinese.values,\n",
    "        english=df_train.English.values,\n",
    "        tokenizer_zh=tokenizer_chinese,\n",
    "        tokenizer_en=tokenizer_english\n",
    "    )\n",
    "    train_data_loader = torch.utils.data.DataLoader(\n",
    "        train_data,\n",
    "        batch_size=128\n",
    "    )\n",
    "    val_data = ChineseToEnglishDataset(\n",
    "        chinese=df_valid.Chinese.values,\n",
    "        english=df_valid.English.values,\n",
    "        tokenizer_zh=tokenizer_chinese,\n",
    "        tokenizer_en=tokenizer_english\n",
    "    )\n",
    "    validation_data_loader = torch.utils.data.DataLoader(\n",
    "        val_data,\n",
    "        batch_size=128\n",
    "    )\n",
    "    num_train_steps = int(len(train_data) / 128) * 15\n",
    "    param_optimizer = list(model.parameters())\n",
    "    optimizer = AdamW(param_optimizer, lr=1e-4)\n",
    "    scheduler = get_linear_schedule_with_warmup(\n",
    "        optimizer,\n",
    "        num_warmup_steps=0,\n",
    "        num_training_steps=num_train_steps\n",
    "    )\n",
    "    best_bleu4 = float('-inf')\n",
    "    es_patience = 3\n",
    "    patience = 0\n",
    "    model_path = '/content/drive/MyDrive/Model/model.pth'\n",
    "    for i in range(15):\n",
    "        print(\"Epoch {}/{}\".format(i+1, 15))\n",
    "        _, train_perplexity = train_fn(train_data_loader, model, optimizer, clip=1.0, scheduler=scheduler)\n",
    "        bleu_score, test_perplexity = eval_fn(validation_data_loader, model)\n",
    "        print(f'Epoch :{i+1} and train perplexity {train_perplexity:.4f}')\n",
    "        print(f'Epoch :{i+1} and test perplexity {test_perplexity:.4f} and BLEU score on validation is {bleu_score}')\n",
    "        is_best = bleu_score > best_bleu4\n",
    "        if is_best:\n",
    "            print(f'BLEU score improved ({best_bleu4:.4f} -> {bleu_score:.4f}). Saving Model!')\n",
    "            best_bleu4 = bleu_score\n",
    "            patience = 0\n",
    "            torch.save(model.state_dict(), model_path)\n",
    "        else:\n",
    "            patience += 1\n",
    "            print(f'Early stopping counter: {patience} out of {es_patience}')\n",
    "            if patience == es_patience:\n",
    "                print(f'Early stopping! Best BLEU4: {best_bleu4:.4f}')\n",
    "                break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 运行训练和评估\n",
    "run()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 推理部分\n",
    "def testing_function(model, src, DEVICE):\n",
    "    model.eval()\n",
    "    target_ids = []\n",
    "    with torch.no_grad():\n",
    "        src_mask = model.pad_mask(src).to(DEVICE)\n",
    "        src = src.unsqueeze(0).to(DEVICE)\n",
    "        encoder_output, encoder_attention = model.encoder(src, src_mask)\n",
    "        target_ids = [0]\n",
    "        for x in range(60):\n",
    "            target = torch.tensor(target_ids, dtype=torch.long).unsqueeze(0).to(DEVICE)\n",
    "            masked_target = torch.bitwise_and(model.pad_mask(target), model.mask_target(target))\n",
    "            decoder_output, attn = model.decoder(target, encoder_output, masked_target, src_mask)\n",
    "            output_logits = model.logits(decoder_output)\n",
    "            output = F.log_softmax(output_logits, dim=-1)\n",
    "            target_id = output.argmax(dim=-1)[:, -1].item()\n",
    "            target_ids.append(target_id)\n",
    "            if target_id == 2:\n",
    "                break\n",
    "        return target_ids, attn.squeeze(0).cpu().detach().numpy(), encoder_attention.squeeze(0).cpu().detach().numpy()\n",
    "\n",
    "# 加载模型并进行推理\n",
    "INPUT_SIZE = tokenizer_chinese.get_vocab_size()\n",
    "OUTPUT_SIZE = tokenizer_english.get_vocab_size()\n",
    "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model = TransformerModel(INPUT_SIZE, OUTPUT_SIZE, 512, 3, DEVICE)\n",
    "model.load_state_dict(torch.load('/content/drive/MyDrive/Model/model.pth', map_location=DEVICE))\n",
    "model.to(DEVICE)\n",
    "df_tr, df_val = train_test_split(data, test_size=0.3, random_state=42)\n",
    "df_tr = df_tr.reset_index(drop=True)\n",
    "df_val = df_val.reset_index(drop=True)\n",
    "train_data = ChineseToEnglishDataset(\n",
    "    chinese=df_tr.Chinese.values,\n",
    "    english=df_tr.English.values,\n",
    "    tokenizer_zh=tokenizer_chinese,\n",
    "    tokenizer_en=tokenizer_english\n",
    ")\n",
    "train_data_loader = torch.utils.data.DataLoader(\n",
    "    train_data,\n",
    "    batch_size=256\n",
    ")\n",
    "val_data = ChineseToEnglishDataset(\n",
    "    chinese=df_val.Chinese.values,\n",
    "    english=df_val.English.values,\n",
    "    tokenizer_zh=tokenizer_chinese,\n",
    "    tokenizer_en=tokenizer_english\n",
    ")\n",
    "validation_data_loader = torch.utils.data.DataLoader(\n",
    "    val_data,\n",
    "    batch_size=256\n",
    ")\n",
    "\n",
    "# 测试推理\n",
    "src = val_data.__getitem__(157)['src']\n",
    "target_data = val_data.__getitem__(157)['target']\n",
    "print(f'Input: {tokenizer_chinese.decode(src.cpu().detach().numpy())}')\n",
    "print(f'Actual Target: {tokenizer_english.decode(target_data.cpu().detach().numpy())}')\n",
    "output, decoder_attent, encoder_atten = testing_function(model, src, DEVICE)\n",
    "target = [tokenizer_english.decode([x]) for x in output]\n",
    "print(f'Predicted Target: {\" \".join(target)}')"
   ]
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "Transformer from scratch for Machine Translation - English to telugu.ipynb",
   "provenance": [],
   "toc_visible": true
  },
  "gpuClass": "standard",
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
