{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "tnxXKDjq3jEL"
   },
   "outputs": [],
   "source": [
    "import tensorflow as tf\n",
    "import matplotlib.pyplot as plt\n",
    "import matplotlib.ticker as ticker\n",
    "from sklearn.model_selection import train_test_split\n",
    "import unicodedata\n",
    "import re\n",
    "import numpy as np\n",
    "import os\n",
    "import io\n",
    "import time\n",
    "from utils.dataset import NMTDataset\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "sy9mcFmxBP4J",
    "outputId": "a5f3a663-71c5-4d97-acd2-cc9249042d77"
   },
   "outputs": [],
   "source": [
    "# 设置缓冲区大小、批次大小和样本数量\n",
    "BUFFER_SIZE = 32000\n",
    "BATCH_SIZE = 64\n",
    "num_examples = 30000\n",
    "# 创建数据集\n",
    "dataset_creator = NMTDataset('en-spa')\n",
    "train_dataset, val_dataset, inp_lang, targ_lang = dataset_creator.call(num_examples, BUFFER_SIZE, BATCH_SIZE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "wVtSz89FBk0D",
    "outputId": "48d1ab71-9d23-4369-f081-12c8927814c2"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Inpute Vocabulary Size: 9414\n",
      "Target Vocabulary Size: 4935\n"
     ]
    }
   ],
   "source": [
    "\n",
    "print(\"Inpute Vocabulary Size: {}\".format(len(inp_lang.word_index)))\n",
    "print(\"Target Vocabulary Size: {}\".format(len(targ_lang.word_index)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "qc6-NK1GtWQt",
    "outputId": "f35ff927-5a3c-480c-95ae-2b0d63844250",
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(TensorShape([64, 16]), TensorShape([64, 11]))"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 获取一个批次的样本输入和目标\n",
    "example_input_batch, example_target_batch = next(iter(train_dataset))\n",
    "example_input_batch.shape, example_target_batch.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "dgVsBjbVDBwX"
   },
   "outputs": [],
   "source": [
    "vocab_inp_size = len(inp_lang.word_index)+1\n",
    "vocab_tar_size = len(targ_lang.word_index)+1\n",
    "max_length_input = example_input_batch.shape[1]\n",
    "max_length_output = example_target_batch.shape[1]\n",
    "\n",
    "embedding_dim = 128\n",
    "units = 1024\n",
    "steps_per_epoch = num_examples//BATCH_SIZE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "nZ2rI24i3jFg"
   },
   "outputs": [],
   "source": [
    "# 编码器由嵌入层和一个GRU层组成。它生成输出和最后的隐藏状态。\n",
    "# Encoder Outputs shape = (BATCH_SIZE, max_length_input, units)\n",
    "# Last Hidden State Shape = (BATCH_SIZE, units)\n",
    "\n",
    "class Encoder(tf.keras.Model):\n",
    "  def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):\n",
    "    super(Encoder, self).__init__()\n",
    "    self.batch_sz = batch_sz\n",
    "    self.enc_units = enc_units\n",
    "    self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n",
    "    self.gru = tf.keras.layers.GRU(self.enc_units,\n",
    "                                   return_sequences=True,\n",
    "                                   return_state=True,\n",
    "                                   recurrent_initializer='glorot_uniform')\n",
    "\n",
    "  def call(self, x, hidden):\n",
    "    x = self.embedding(x)\n",
    "    output, state = self.gru(x, initial_state = hidden)\n",
    "    return output, state\n",
    "\n",
    "  def initialize_hidden_state(self):\n",
    "    return tf.zeros((self.batch_sz, self.enc_units))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "60gSVh05Jl6l",
    "outputId": "d5ad818c-8ef1-4f16-e9fb-b04503a5ca8a"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Encoder output shape: (batch size, sequence length, units) (64, 16, 1024)\n",
      "Encoder Hidden state shape: (batch size, units) (64, 1024)\n"
     ]
    }
   ],
   "source": [
    "encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)\n",
    "\n",
    "# 样本输入\n",
    "sample_hidden = encoder.initialize_hidden_state()\n",
    "sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)\n",
    "print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))\n",
    "print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "umohpBN2OM94"
   },
   "outputs": [],
   "source": [
    "# 定义Bahdanau注意力机制\n",
    "class BahdanauAttention(tf.keras.layers.Layer):\n",
    "  def __init__(self, units):\n",
    "    super(BahdanauAttention, self).__init__()\n",
    "    self.W1 = tf.keras.layers.Dense(units)\n",
    "    self.W2 = tf.keras.layers.Dense(units)\n",
    "    self.V = tf.keras.layers.Dense(1)\n",
    "\n",
    "  def call(self, query, values):\n",
    "    query_with_time_axis = tf.expand_dims(query, 1)\n",
    "    score = self.V(tf.nn.tanh(\n",
    "        self.W1(query_with_time_axis) + self.W2(values)))\n",
    "    attention_weights = tf.nn.softmax(score, axis=1)\n",
    "    context_vector = attention_weights * values\n",
    "    context_vector = tf.reduce_sum(context_vector, axis=1)\n",
    "\n",
    "    return context_vector, attention_weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "k534zTHiDjQU",
    "outputId": "6b662027-8c36-4798-f1a0-0f186f8dbc85"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Attention result shape: (batch size, units) (64, 1024)\n",
      "Attention weights shape: (batch_size, sequence_length, 1) (64, 16, 1)\n"
     ]
    }
   ],
   "source": [
    "attention_layer = BahdanauAttention(10)\n",
    "attention_result, attention_weights = attention_layer(sample_hidden, sample_output)\n",
    "\n",
    "print(\"Attention result shape: (batch size, units) {}\".format(attention_result.shape))\n",
    "print(\"Attention weights shape: (batch_size, sequence_length, 1) {}\".format(attention_weights.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "yJ_B3mhW3jFk"
   },
   "outputs": [],
   "source": [
    "# 定义解码器\n",
    "class Decoder(tf.keras.Model):\n",
    "  def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):\n",
    "    super(Decoder, self).__init__()\n",
    "    self.batch_sz = batch_sz\n",
    "    self.dec_units = dec_units\n",
    "    self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)\n",
    "    self.gru = tf.keras.layers.GRU(self.dec_units,\n",
    "                                   return_sequences=True,\n",
    "                                   return_state=True,\n",
    "                                   recurrent_initializer='glorot_uniform')\n",
    "    self.fc = tf.keras.layers.Dense(vocab_size)\n",
    "\n",
    "    self.attention = BahdanauAttention(self.dec_units)\n",
    "\n",
    "  def call(self, x, hidden, enc_output):\n",
    "    context_vector, attention_weights = self.attention(hidden, enc_output)\n",
    "\n",
    "    x = self.embedding(x)\n",
    "\n",
    "    x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)\n",
    "\n",
    "    output, state = self.gru(x)\n",
    "\n",
    "    output = tf.reshape(output, (-1, output.shape[2]))\n",
    "\n",
    "    x = self.fc(output)\n",
    "\n",
    "    return x, state, attention_weights"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "P5UY8wko3jFp",
    "outputId": "fee4a9eb-3040-47f7-b990-d3808e940516"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Decoder output shape: (batch_size, vocab size) (64, 4936)\n"
     ]
    }
   ],
   "source": [
    "decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)\n",
    "\n",
    "sample_decoder_output, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),\n",
    "                                      sample_hidden, sample_output)\n",
    "\n",
    "print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "WmTHr5iV3jFr"
   },
   "outputs": [],
   "source": [
    "optimizer = tf.keras.optimizers.Adam()\n",
    "loss_object = tf.keras.losses.SparseCategoricalCrossentropy(\n",
    "    from_logits=True, reduction='none')\n",
    "\n",
    "def loss_function(real, pred):\n",
    "  mask = tf.math.logical_not(tf.math.equal(real, 0))\n",
    "  loss_ = loss_object(real, pred)\n",
    "\n",
    "  mask = tf.cast(mask, dtype=loss_.dtype)\n",
    "  loss_ *= mask\n",
    "\n",
    "  return tf.reduce_mean(loss_)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "Zj8bXQTgNwrF"
   },
   "outputs": [],
   "source": [
    "checkpoint_dir = './training_checkpoints'\n",
    "checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n",
    "checkpoint = tf.train.Checkpoint(optimizer=optimizer,\n",
    "                                 encoder=encoder,\n",
    "                                 decoder=decoder)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "sC9ArXSsVfqn"
   },
   "outputs": [],
   "source": [
    "@tf.function\n",
    "def train_step(inp, targ, enc_hidden):\n",
    "  loss = 0\n",
    "\n",
    "  with tf.GradientTape() as tape:\n",
    "    enc_output, enc_hidden = encoder(inp, enc_hidden)\n",
    "\n",
    "    dec_hidden = enc_hidden\n",
    "\n",
    "    dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n",
    "\n",
    "    for t in range(1, targ.shape[1]):\n",
    "      predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)\n",
    "\n",
    "      loss += loss_function(targ[:, t], predictions)\n",
    "\n",
    "      dec_input = tf.expand_dims(targ[:, t], 1)\n",
    "\n",
    "  batch_loss = (loss / int(targ.shape[1]))\n",
    "\n",
    "  variables = encoder.trainable_variables + decoder.trainable_variables\n",
    "\n",
    "  gradients = tape.gradient(loss, variables)\n",
    "\n",
    "  optimizer.apply_gradients(zip(gradients, variables))\n",
    "\n",
    "  return batch_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "colab_type": "code",
    "id": "ddefjBMa3jF0",
    "outputId": "b9588938-68d5-4ac1-a2cd-d6f07d3d2b57"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1 Batch 0 Loss 4.4696\n",
      "Epoch 1 Batch 100 Loss 2.2149\n",
      "Epoch 1 Batch 200 Loss 2.0552\n",
      "Epoch 1 Batch 300 Loss 1.7684\n",
      "Epoch 1 Loss 1.6738\n",
      "Time taken for 1 epoch 24.254799604415894 sec\n",
      "\n",
      "Epoch 2 Batch 0 Loss 1.6094\n",
      "Epoch 2 Batch 100 Loss 1.5178\n",
      "Epoch 2 Batch 200 Loss 1.5619\n",
      "Epoch 2 Batch 300 Loss 1.3594\n",
      "Epoch 2 Loss 1.1796\n",
      "Time taken for 1 epoch 16.71476650238037 sec\n",
      "\n",
      "Epoch 3 Batch 0 Loss 1.3065\n",
      "Epoch 3 Batch 100 Loss 1.1197\n",
      "Epoch 3 Batch 200 Loss 1.1242\n",
      "Epoch 3 Batch 300 Loss 1.0515\n",
      "Epoch 3 Loss 0.9171\n",
      "Time taken for 1 epoch 15.794095516204834 sec\n",
      "\n",
      "Epoch 4 Batch 0 Loss 0.9699\n",
      "Epoch 4 Batch 100 Loss 0.8591\n",
      "Epoch 4 Batch 200 Loss 0.8219\n",
      "Epoch 4 Batch 300 Loss 0.7628\n",
      "Epoch 4 Loss 0.6914\n",
      "Time taken for 1 epoch 16.67408585548401 sec\n",
      "\n",
      "Epoch 5 Batch 0 Loss 0.6373\n",
      "Epoch 5 Batch 100 Loss 0.6503\n",
      "Epoch 5 Batch 200 Loss 0.6269\n",
      "Epoch 5 Batch 300 Loss 0.5792\n",
      "Epoch 5 Loss 0.4973\n",
      "Time taken for 1 epoch 15.842775821685791 sec\n",
      "\n",
      "Epoch 6 Batch 0 Loss 0.4419\n",
      "Epoch 6 Batch 100 Loss 0.4350\n",
      "Epoch 6 Batch 200 Loss 0.4194\n",
      "Epoch 6 Batch 300 Loss 0.3688\n",
      "Epoch 6 Loss 0.3429\n",
      "Time taken for 1 epoch 16.673864364624023 sec\n",
      "\n",
      "Epoch 7 Batch 0 Loss 0.2914\n",
      "Epoch 7 Batch 100 Loss 0.2792\n",
      "Epoch 7 Batch 200 Loss 0.3043\n",
      "Epoch 7 Batch 300 Loss 0.4285\n",
      "Epoch 7 Loss 0.2339\n",
      "Time taken for 1 epoch 15.876803636550903 sec\n",
      "\n",
      "Epoch 8 Batch 0 Loss 0.1814\n",
      "Epoch 8 Batch 100 Loss 0.1978\n",
      "Epoch 8 Batch 200 Loss 0.2065\n",
      "Epoch 8 Batch 300 Loss 0.2078\n",
      "Epoch 8 Loss 0.1590\n",
      "Time taken for 1 epoch 16.750229835510254 sec\n",
      "\n",
      "Epoch 9 Batch 0 Loss 0.1296\n",
      "Epoch 9 Batch 100 Loss 0.1596\n",
      "Epoch 9 Batch 200 Loss 0.1459\n",
      "Epoch 9 Batch 300 Loss 0.1850\n",
      "Epoch 9 Loss 0.1123\n",
      "Time taken for 1 epoch 15.941059827804565 sec\n",
      "\n",
      "Epoch 10 Batch 0 Loss 0.1046\n",
      "Epoch 10 Batch 100 Loss 0.0870\n",
      "Epoch 10 Batch 200 Loss 0.1620\n",
      "Epoch 10 Batch 300 Loss 0.1324\n",
      "Epoch 10 Loss 0.0844\n",
      "Time taken for 1 epoch 16.72890329360962 sec\n",
      "\n"
     ]
    }
   ],
   "source": [
    "EPOCHS = 10\n",
    "\n",
    "for epoch in range(EPOCHS):\n",
    "  start = time.time()\n",
    "\n",
    "  enc_hidden = encoder.initialize_hidden_state()\n",
    "  total_loss = 0\n",
    "\n",
    "  for (batch, (inp, targ)) in enumerate(train_dataset.take(steps_per_epoch)):\n",
    "    batch_loss = train_step(inp, targ, enc_hidden)\n",
    "    total_loss += batch_loss\n",
    "\n",
    "    if batch % 100 == 0:\n",
    "      print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,\n",
    "                                                   batch,\n",
    "                                                   batch_loss.numpy()))\n",
    "  # saving (checkpoint) the model every 2 epochs\n",
    "  if (epoch + 1) % 2 == 0:\n",
    "    checkpoint.save(file_prefix = checkpoint_prefix)\n",
    "\n",
    "  print('Epoch {} Loss {:.4f}'.format(epoch + 1,\n",
    "                                      total_loss / steps_per_epoch))\n",
    "  print('Time taken for 1 epoch {} sec\\n'.format(time.time() - start))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "EbQpyYs13jF_"
   },
   "outputs": [],
   "source": [
    "def evaluate(sentence):\n",
    "\n",
    "  sentence = dataset_creator.preprocess_sentence(sentence)\n",
    "\n",
    "  inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]\n",
    "  inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],\n",
    "                                                         maxlen=max_length_input,\n",
    "                                                         padding='post')\n",
    "  inputs = tf.convert_to_tensor(inputs)\n",
    "\n",
    "  result = ''\n",
    "\n",
    "  hidden = [tf.zeros((1, units))]\n",
    "  enc_out, enc_hidden = encoder(inputs, hidden)\n",
    "\n",
    "  dec_hidden = enc_hidden\n",
    "  dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)\n",
    "\n",
    "  for t in range(max_length_output):\n",
    "    predictions, dec_hidden, attention_weights = decoder(dec_input,\n",
    "                                                         dec_hidden,\n",
    "                                                         enc_out)\n",
    "\n",
    "    predicted_id = tf.argmax(predictions[0]).numpy()\n",
    "\n",
    "    result += targ_lang.index_word[predicted_id] + ' '\n",
    "\n",
    "    if targ_lang.index_word[predicted_id] == '<end>':\n",
    "      return result, sentence\n",
    "\n",
    "    # the predicted ID is fed back into the model\n",
    "    dec_input = tf.expand_dims([predicted_id], 0)\n",
    "\n",
    "  return result, sentence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "sl9zUHzg3jGI"
   },
   "outputs": [],
   "source": [
    "def translate(sentence):\n",
    "  result, sentence = evaluate(sentence)\n",
    "\n",
    "  print('Input: %s' % (sentence))\n",
    "  print('Predicted translation: {}'.format(result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 34
    },
    "colab_type": "code",
    "id": "UJpT9D5_OgP6",
    "outputId": "d1628fc9-1681-48c1-840e-61677a7e49c3"
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<tensorflow.python.checkpoint.checkpoint.CheckpointLoadStatus at 0x270d7fb6b60>"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "WrAM0FDomq3E",
    "outputId": "5104049f-b169-42dd-a26d-63eedcceda2d"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input: <start> hace mucho frio aqui . <end>\n",
      "Predicted translation: it s very cold here . <end> \n"
     ]
    }
   ],
   "source": [
    "translate(u'hace mucho frio aqui.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "zSx2iM36EZQZ",
    "outputId": "5b8dc076-acb5-4d62-d66f-13f8d0234adb"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input: <start> esta es mi vida . <end>\n",
      "Predicted translation: this is my life . <end> \n"
     ]
    }
   ],
   "source": [
    "translate(u'esta es mi vida.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "A3LLCx3ZE0Ls",
    "outputId": "8c970bcf-97d8-480d-ed79-cfc80c757939"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input: <start> ¿ todavia estan en casa ? <end>\n",
      "Predicted translation: are you still at home ? <end> \n"
     ]
    }
   ],
   "source": [
    "translate(u'¿todavia estan en casa?')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 51
    },
    "colab_type": "code",
    "id": "DUQVLVqUE1YW",
    "outputId": "64f3c639-9bd1-41dc-de13-b7cd7f0e3c03"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Input: <start> trata de averiguarlo . <end>\n",
      "Predicted translation: try to figure it out . <end> \n"
     ]
    }
   ],
   "source": [
    "translate(u'trata de averiguarlo.')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {
    "colab": {},
    "colab_type": "code",
    "id": "92aXUSuvwzOt"
   },
   "outputs": [],
   "source": [
    "def translate_batch(test_dataset):\n",
    "  with open('output_text.txt', 'w') as f:\n",
    "    for (inputs, targets) in test_dataset:\n",
    "      outputs = np.zeros((BATCH_SIZE, max_length_output),dtype=np.int16)\n",
    "      hidden_state = tf.zeros((BATCH_SIZE, units))\n",
    "      enc_output, dec_h = encoder(inputs, hidden_state)\n",
    "      dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n",
    "      for t in range(max_length_output):\n",
    "        preds, dec_h, _ = decoder(dec_input, dec_h, enc_output)\n",
    "        predicted_id = tf.argmax(preds, axis=1).numpy()\n",
    "        outputs[:, t] = predicted_id\n",
    "        dec_input = tf.expand_dims(predicted_id, 1)\n",
    "      outputs = targ_lang.sequences_to_texts(outputs)\n",
    "      for t, item in enumerate(outputs):\n",
    "        try:\n",
    "          i = item.index('<end>')\n",
    "          f.write(\"%s\\n\" %item[:i])\n",
    "        except: \n",
    "          f.write(\"%s \\n\" % item) # For those translated sequences which didn't correctly translated and have <end> token.\n",
    "\n",
    "outputs = translate_batch(val_dataset)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 204
    },
    "colab_type": "code",
    "id": "t2Ci8zsFbL8q",
    "outputId": "a90041ce-b3fa-449c-a410-1ccc08f659ff"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'numpy.ndarray'>\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "['<start> it looks like a duck . <end> <OOV> <OOV> <OOV>',\n",
       " '<start> i m undressing . <end> <OOV> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> i am married . <end> <OOV> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> ask them . <end> <OOV> <OOV> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> i am on duty now . <end> <OOV> <OOV> <OOV>',\n",
       " '<start> i m serious . <end> <OOV> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> the roses smell good . <end> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> do you hear anything ? <end> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> can you believe that ? <end> <OOV> <OOV> <OOV> <OOV>',\n",
       " '<start> take tom inside . <end> <OOV> <OOV> <OOV> <OOV> <OOV>']"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_targets = list(val_dataset.take(1))\n",
    "val_targets = np.asarray(val_targets[0][1])\n",
    "print(type(val_targets))\n",
    "targ_lang.sequences_to_texts(val_targets)[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "BLEU Score: 0.3320\n"
     ]
    }
   ],
   "source": [
    "import nltk\n",
    "from nltk.translate.bleu_score import sentence_bleu, corpus_bleu\n",
    "\n",
    "# 生成模型翻译\n",
    "translated_sentences = []\n",
    "reference_sentences = []\n",
    "\n",
    "# 假设 val_dataset 是验证数据集\n",
    "for (inputs, targets) in val_dataset:\n",
    "    outputs = np.zeros((BATCH_SIZE, max_length_output), dtype=np.int16)\n",
    "    hidden_state = tf.zeros((BATCH_SIZE, units))\n",
    "    enc_output, dec_h = encoder(inputs, hidden_state)\n",
    "    dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)\n",
    "    for t in range(max_length_output):\n",
    "        preds, dec_h, _ = decoder(dec_input, dec_h, enc_output)\n",
    "        predicted_id = tf.argmax(preds, axis=1).numpy()\n",
    "        outputs[:, t] = predicted_id\n",
    "        dec_input = tf.expand_dims(predicted_id, 1)\n",
    "    outputs = targ_lang.sequences_to_texts(outputs)\n",
    "    for i, sentence in enumerate(outputs):\n",
    "        try:\n",
    "            end_index = sentence.index('<end>')\n",
    "            translated_sentences.append(sentence[:end_index].strip().split(' '))\n",
    "        except ValueError:\n",
    "            translated_sentences.append(sentence.strip().split(' '))\n",
    "\n",
    "    targets = targ_lang.sequences_to_texts(targets.numpy())\n",
    "    for target in targets:\n",
    "        try:\n",
    "            end_index = target.index('<end>')\n",
    "            reference_sentences.append([target[:end_index].strip().split(' ')])\n",
    "        except ValueError:\n",
    "            reference_sentences.append([target.strip().split(' ')])\n",
    "\n",
    "# 计算 BLEU 分数\n",
    "bleu_score = corpus_bleu(reference_sentences, translated_sentences)\n",
    "print(f\"BLEU Score: {bleu_score:.4f}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "accelerator": "GPU",
  "colab": {
   "collapsed_sections": [],
   "name": "4_enc_dec_with_BahdanauAttention.ipynb",
   "provenance": [],
   "toc_visible": true
  },
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
