{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "071c7845",
   "metadata": {},
   "source": [
    "下面介绍基于循环神经网络的编码器和解码器的代码实现。首先是作为编码器的循环神经网络。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f682c7d8",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "代码修改自GitHub项目pytorch/tutorials\n",
    "（Copyright (c) 2023, PyTorch, BSD-3-Clause License（见附录））\n",
    "\"\"\"\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "class RNNEncoder(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super(RNNEncoder, self).__init__()\n",
    "        # 隐层大小\n",
    "        self.hidden_size = hidden_size\n",
    "        # 词表大小\n",
    "        self.vocab_size = vocab_size\n",
    "        # 词嵌入层\n",
    "        self.embedding = nn.Embedding(self.vocab_size,\\\n",
    "            self.hidden_size)\n",
    "        self.gru = nn.GRU(self.hidden_size, self.hidden_size,\\\n",
    "            batch_first=True)\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # inputs: batch * seq_len\n",
    "        # 注意门控循环单元使用batch_first=True，因此输入需要至少batch为1\n",
    "        features = self.embedding(inputs)\n",
    "        output, hidden = self.gru(features)\n",
    "        return output, hidden"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0dc8af74",
   "metadata": {},
   "source": [
    "接下来是作为解码器的另一个循环神经网络的代码实现。<!--我们使用编码器最终的输出用作解码器的初始隐状态，这个输出向量有时称作上下文向量（context vector），它编码了整个源序列的信息。解码器最初的输入词元是“\\<sos\\>”（start-of-string）。解码器的目标为，输入编码器隐状态，一步一步解码出整个目标序列。解码器每一步的输入可以是真实目标序列，也可以是解码器上一步的预测结果。-->"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9beee561",
   "metadata": {},
   "outputs": [],
   "source": [
    "class RNNDecoder(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super(RNNDecoder, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.vocab_size = vocab_size\n",
    "        # 序列到序列任务并不限制编码器和解码器输入同一种语言，\n",
    "        # 因此解码器也需要定义一个嵌入层\n",
    "        self.embedding = nn.Embedding(self.vocab_size, self.hidden_size)\n",
    "        self.gru = nn.GRU(self.hidden_size, self.hidden_size,\\\n",
    "            batch_first=True)\n",
    "        # 用于将输出的隐状态映射为词表上的分布\n",
    "        self.out = nn.Linear(self.hidden_size, self.vocab_size)\n",
    "\n",
    "    # 解码整个序列\n",
    "    def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):\n",
    "        batch_size = encoder_outputs.size(0)\n",
    "        # 从<sos>开始解码\n",
    "        decoder_input = torch.empty(batch_size, 1,\\\n",
    "            dtype=torch.long).fill_(SOS_token)\n",
    "        decoder_hidden = encoder_hidden\n",
    "        decoder_outputs = []\n",
    "        \n",
    "        # 如果目标序列确定，最大解码步数确定；\n",
    "        # 如果目标序列不确定，解码到最大长度\n",
    "        if target_tensor is not None:\n",
    "            seq_length = target_tensor.size(1)\n",
    "        else:\n",
    "            seq_length = MAX_LENGTH\n",
    "        \n",
    "        # 进行seq_length次解码\n",
    "        for i in range(seq_length):\n",
    "            # 每次输入一个词和一个隐状态\n",
    "            decoder_output, decoder_hidden = self.forward_step(\\\n",
    "                decoder_input, decoder_hidden)\n",
    "            decoder_outputs.append(decoder_output)\n",
    "\n",
    "            if target_tensor is not None:\n",
    "                # teacher forcing: 使用真实目标序列作为下一步的输入\n",
    "                decoder_input = target_tensor[:, i].unsqueeze(1)\n",
    "            else:\n",
    "                # 从当前步的输出概率分布中选取概率最大的预测结果\n",
    "                # 作为下一步的输入\n",
    "                _, topi = decoder_output.topk(1)\n",
    "                # 使用detach从当前计算图中分离，避免回传梯度\n",
    "                decoder_input = topi.squeeze(-1).detach()\n",
    "\n",
    "        decoder_outputs = torch.cat(decoder_outputs, dim=1)\n",
    "        decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)\n",
    "        # 为了与AttnRNNDecoder接口保持统一，最后输出None\n",
    "        return decoder_outputs, decoder_hidden, None\n",
    "\n",
    "    # 解码一步\n",
    "    def forward_step(self, input, hidden):\n",
    "        output = self.embedding(input)\n",
    "        output = F.relu(output)\n",
    "        output, hidden = self.gru(output, hidden)\n",
    "        output = self.out(output)\n",
    "        return output, hidden\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "38306ed9",
   "metadata": {},
   "source": [
    "下面介绍基于注意力机制的循环神经网络解码器的代码实现。\n",
    "我们使用一个注意力层来计算注意力权重，其输入为解码器的输入和隐状态。\n",
    "这里使用Bahdanau注意力（Bahdanau attention），这是序列到序列模型中应用最广泛的注意力机制，特别是机器翻译任务。该注意力机制使用一个对齐模型（alignment model）来计算编码器和解码器隐状态之间的注意力分数，具体来讲就是一个前馈神经网络。相比于点乘注意力，Bahdanau注意力利用了非线性变换。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "d675aa6c",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "代码修改自GitHub项目pytorch/tutorials\n",
    "（Copyright (c) 2023, PyTorch, BSD-3-Clause License（见附录））\n",
    "\"\"\"\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class BahdanauAttention(nn.Module):\n",
    "    def __init__(self, hidden_size):\n",
    "        super(BahdanauAttention, self).__init__()\n",
    "        self.Wa = nn.Linear(hidden_size, hidden_size)\n",
    "        self.Ua = nn.Linear(hidden_size, hidden_size)\n",
    "        self.Va = nn.Linear(hidden_size, 1)\n",
    "\n",
    "    def forward(self, query, keys):\n",
    "        # query: batch * 1 * hidden_size\n",
    "        # keys: batch * seq_length * hidden_size\n",
    "        # 这一步用到了广播（broadcast）机制\n",
    "        scores = self.Va(torch.tanh(self.Wa(query) + self.Ua(keys)))\n",
    "        scores = scores.squeeze(2).unsqueeze(1)\n",
    "\n",
    "        weights = F.softmax(scores, dim=-1)\n",
    "        context = torch.bmm(weights, keys)\n",
    "        return context, weights\n",
    "\n",
    "class AttnRNNDecoder(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super(AttnRNNDecoder, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.vocab_size = vocab_size\n",
    "        self.embedding = nn.Embedding(self.vocab_size, self.hidden_size)\n",
    "        self.attention = BahdanauAttention(hidden_size)\n",
    "        # 输入来自解码器输入和上下文向量，因此输入大小为2 * hidden_size\n",
    "        self.gru = nn.GRU(2 * self.hidden_size, self.hidden_size,\\\n",
    "            batch_first=True)\n",
    "        # 用于将注意力的结果映射为词表上的分布\n",
    "        self.out = nn.Linear(self.hidden_size, self.vocab_size)\n",
    "\n",
    "    # 解码整个序列\n",
    "    def forward(self, encoder_outputs, encoder_hidden, target_tensor=None):\n",
    "        batch_size = encoder_outputs.size(0)\n",
    "        # 从<sos>开始解码\n",
    "        decoder_input = torch.empty(batch_size, 1, dtype=\\\n",
    "            torch.long).fill_(SOS_token)\n",
    "        decoder_hidden = encoder_hidden\n",
    "        decoder_outputs = []\n",
    "        attentions = []\n",
    "\n",
    "        # 如果目标序列确定，最大解码步数确定；\n",
    "        # 如果目标序列不确定，解码到最大长度\n",
    "        if target_tensor is not None:\n",
    "            seq_length = target_tensor.size(1)\n",
    "        else:\n",
    "            seq_length = MAX_LENGTH\n",
    "        \n",
    "        # 进行seq_length次解码\n",
    "        for i in range(seq_length):\n",
    "            # 每次输入一个词和一个隐状态\n",
    "            decoder_output, decoder_hidden, attn_weights = \\\n",
    "                self.forward_step(\n",
    "                    decoder_input, decoder_hidden, encoder_outputs\n",
    "            )\n",
    "            decoder_outputs.append(decoder_output)\n",
    "            attentions.append(attn_weights)\n",
    "\n",
    "            if target_tensor is not None:\n",
    "                # teacher forcing: 使用真实目标序列作为下一步的输入\n",
    "                decoder_input = target_tensor[:, i].unsqueeze(1)\n",
    "            else:\n",
    "                # 从当前步的输出概率分布中选取概率最大的预测结果\n",
    "                # 作为下一步的输入\n",
    "                _, topi = decoder_output.topk(1)\n",
    "                # 使用detach从当前计算图中分离，避免回传梯度\n",
    "                decoder_input = topi.squeeze(-1).detach()\n",
    "\n",
    "        decoder_outputs = torch.cat(decoder_outputs, dim=1)\n",
    "        decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)\n",
    "        attentions = torch.cat(attentions, dim=1)\n",
    "        # 与RNNDecoder接口保持统一，最后输出注意力权重\n",
    "        return decoder_outputs, decoder_hidden, attentions\n",
    "\n",
    "    # 解码一步\n",
    "    def forward_step(self, input, hidden, encoder_outputs):\n",
    "        embeded =  self.embedding(input)\n",
    "        # 输出的隐状态为1 * batch * hidden_size，\n",
    "        # 注意力的输入需要batch * 1 * hidden_size\n",
    "        query = hidden.permute(1, 0, 2)\n",
    "        context, attn_weights = self.attention(query, encoder_outputs)\n",
    "        input_gru = torch.cat((embeded, context), dim=2)\n",
    "        # 输入的隐状态需要1 * batch * hidden_size\n",
    "        output, hidden = self.gru(input_gru, hidden)\n",
    "        output = self.out(output)\n",
    "        return output, hidden, attn_weights\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7d63b031",
   "metadata": {},
   "source": [
    "\n",
    "接下来我们实现基于Transformer的编码器和解码器。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c9a91cf4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import numpy as np\n",
    "import sys\n",
    "\n",
    "from transformer import *\n",
    "\n",
    "class TransformerEncoder(nn.Module):\n",
    "    def __init__(self, vocab_size, max_len, hidden_size, num_heads,\\\n",
    "            dropout, intermediate_size):\n",
    "        super().__init__()\n",
    "        self.embedding_layer = EmbeddingLayer(vocab_size, max_len,\\\n",
    "            hidden_size)\n",
    "        # 直接使用TransformerLayer作为编码层，简单起见只使用一层\n",
    "        self.layer = TransformerLayer(hidden_size, num_heads,\\\n",
    "            dropout, intermediate_size)\n",
    "        # 与TransformerLM不同，编码器不需要线性层用于输出\n",
    "        \n",
    "    def forward(self, input_ids):\n",
    "        # 这里实现的forward()函数一次只能处理一句话，\n",
    "        # 如果想要支持批次运算，需要根据输入序列的长度返回隐状态\n",
    "        assert input_ids.ndim == 2 and input_ids.size(0) == 1\n",
    "        seq_len = input_ids.size(1)\n",
    "        assert seq_len <= self.embedding_layer.max_len\n",
    "        \n",
    "        # 1 * seq_len\n",
    "        pos_ids = torch.unsqueeze(torch.arange(seq_len), dim=0)\n",
    "        attention_mask = torch.ones((1, seq_len), dtype=torch.int32)\n",
    "        input_states = self.embedding_layer(input_ids, pos_ids)\n",
    "        hidden_states = self.layer(input_states, attention_mask)\n",
    "        return hidden_states, attention_mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "5e8cfc9c",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MultiHeadCrossAttention(MultiHeadSelfAttention):\n",
    "    def forward(self, tgt, tgt_mask, src, src_mask):\n",
    "        \"\"\"\n",
    "        tgt: query, batch_size * tgt_seq_len * hidden_size\n",
    "        tgt_mask: batch_size * tgt_seq_len\n",
    "        src: keys/values, batch_size * src_seq_len * hidden_size\n",
    "        src_mask: batch_size * src_seq_len\n",
    "        \"\"\"\n",
    "        # (batch_size * num_heads) * seq_len * (hidden_size / num_heads)\n",
    "        queries = self.transpose_qkv(self.W_q(tgt))\n",
    "        keys = self.transpose_qkv(self.W_k(src))\n",
    "        values = self.transpose_qkv(self.W_v(src))\n",
    "        # 这一步与自注意力不同，计算交叉掩码\n",
    "        # batch_size * tgt_seq_len * src_seq_len\n",
    "        attention_mask = tgt_mask.unsqueeze(2) * src_mask.unsqueeze(1)\n",
    "        # 重复张量的元素，用以支持多个注意力头的运算\n",
    "        # (batch_size * num_heads) * tgt_seq_len * src_seq_len\n",
    "        attention_mask = torch.repeat_interleave(attention_mask,\\\n",
    "            repeats=self.num_heads, dim=0)\n",
    "        # (batch_size * num_heads) * tgt_seq_len * \\\n",
    "        # (hidden_size / num_heads)\n",
    "        output = self.attention(queries, keys, values, attention_mask)\n",
    "        # batch * tgt_seq_len * hidden_size\n",
    "        output_concat = self.transpose_output(output)\n",
    "        return self.W_o(output_concat)\n",
    "\n",
    "# TransformerDecoderLayer比TransformerLayer多了交叉多头注意力\n",
    "class TransformerDecoderLayer(nn.Module):\n",
    "    def __init__(self, hidden_size, num_heads, dropout,\\\n",
    "                 intermediate_size):\n",
    "        super().__init__()\n",
    "        self.self_attention = MultiHeadSelfAttention(hidden_size,\\\n",
    "            num_heads, dropout)\n",
    "        self.add_norm1 = AddNorm(hidden_size, dropout)\n",
    "        self.enc_attention = MultiHeadCrossAttention(hidden_size,\\\n",
    "            num_heads, dropout)\n",
    "        self.add_norm2 = AddNorm(hidden_size, dropout)\n",
    "        self.fnn = PositionWiseFNN(hidden_size, intermediate_size)\n",
    "        self.add_norm3 = AddNorm(hidden_size, dropout)\n",
    "\n",
    "    def forward(self, src_states, src_mask, tgt_states, tgt_mask):\n",
    "        # 掩码多头自注意力\n",
    "        tgt = self.add_norm1(tgt_states, self.self_attention(\\\n",
    "            tgt_states, tgt_states, tgt_states, tgt_mask))\n",
    "        # 交叉多头自注意力\n",
    "        tgt = self.add_norm2(tgt, self.enc_attention(tgt,\\\n",
    "            tgt_mask, src_states, src_mask))\n",
    "        # 前馈神经网络\n",
    "        return self.add_norm3(tgt, self.fnn(tgt))\n",
    "\n",
    "class TransformerDecoder(nn.Module):\n",
    "    def __init__(self, vocab_size, max_len, hidden_size, num_heads,\\\n",
    "                 dropout, intermediate_size):\n",
    "        super().__init__()\n",
    "        self.embedding_layer = EmbeddingLayer(vocab_size, max_len,\\\n",
    "            hidden_size)\n",
    "        # 简单起见只使用一层\n",
    "        self.layer = TransformerDecoderLayer(hidden_size, num_heads,\\\n",
    "            dropout, intermediate_size)\n",
    "        # 解码器与TransformerLM一样，需要输出层\n",
    "        self.output_layer = nn.Linear(hidden_size, vocab_size)\n",
    "        \n",
    "    def forward(self, src_states, src_mask, tgt_tensor=None):\n",
    "        # 确保一次只输入一句话，形状为1 * seq_len * hidden_size\n",
    "        assert src_states.ndim == 3 and src_states.size(0) == 1\n",
    "        \n",
    "        if tgt_tensor is not None:\n",
    "            # 确保一次只输入一句话，形状为1 * seq_len\n",
    "            assert tgt_tensor.ndim == 2 and tgt_tensor.size(0) == 1\n",
    "            seq_len = tgt_tensor.size(1)\n",
    "            assert seq_len <= self.embedding_layer.max_len\n",
    "        else:\n",
    "            seq_len = self.embedding_layer.max_len\n",
    "        \n",
    "        decoder_input = torch.empty(1, 1, dtype=torch.long).\\\n",
    "            fill_(SOS_token)\n",
    "        decoder_outputs = []\n",
    "        \n",
    "        for i in range(seq_len):\n",
    "            decoder_output = self.forward_step(decoder_input,\\\n",
    "                src_mask, src_states)\n",
    "            decoder_outputs.append(decoder_output)\n",
    "            \n",
    "            if tgt_tensor is not None:\n",
    "                # teacher forcing: 使用真实目标序列作为下一步的输入\n",
    "                decoder_input = torch.cat((decoder_input,\\\n",
    "                    tgt_tensor[:, i:i+1]), 1)\n",
    "            else:\n",
    "                # 从当前步的输出概率分布中选取概率最大的预测结果\n",
    "                # 作为下一步的输入\n",
    "                _, topi = decoder_output.topk(1)\n",
    "                # 使用detach从当前计算图中分离，避免回传梯度\n",
    "                decoder_input = torch.cat((decoder_input,\\\n",
    "                    topi.squeeze(-1).detach()), 1)\n",
    "                \n",
    "        decoder_outputs = torch.cat(decoder_outputs, dim=1)\n",
    "        decoder_outputs = F.log_softmax(decoder_outputs, dim=-1)\n",
    "        # 与RNNDecoder接口保持统一\n",
    "        return decoder_outputs, None, None\n",
    "        \n",
    "    # 解码一步，与RNNDecoder接口略有不同，RNNDecoder一次输入\n",
    "    # 一个隐状态和一个词，输出一个分布、一个隐状态\n",
    "    # TransformerDecoder不需要输入隐状态，\n",
    "    # 输入整个目标端历史输入序列，输出一个分布，不输出隐状态\n",
    "    def forward_step(self, tgt_inputs, src_mask, src_states):\n",
    "        seq_len = tgt_inputs.size(1)\n",
    "        # 1 * seq_len\n",
    "        pos_ids = torch.unsqueeze(torch.arange(seq_len), dim=0)\n",
    "        tgt_mask = torch.ones((1, seq_len), dtype=torch.int32)\n",
    "        tgt_states = self.embedding_layer(tgt_inputs, pos_ids)\n",
    "        hidden_states = self.layer(src_states, src_mask, tgt_states,\\\n",
    "            tgt_mask)\n",
    "        output = self.output_layer(hidden_states[:, -1:, :])\n",
    "        return output"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e58b811",
   "metadata": {},
   "source": [
    "下面以机器翻译（中-英）为例展示如何训练序列到序列模型。这里使用的是中英文Books数据，其中中文标题来源于第4章所使用的数据集，英文标题是使用已训练好的机器翻译模型从中文标题翻译而得，因此该数据并不保证准确性，仅用于演示。\n",
    "\n",
    "首先需要对源语言和目标语言分别建立索引，并记录词频。\n",
    "\n",
    "<!-- 代码来自：\n",
    "\n",
    "https://pytorch.org/tutorials/intermediate/seq2seq_translation_tutorial.html <span style=\"color:blue;font-size:20px\">BSD-3-Clause license</span>\n",
    "\n",
    "下载链接：\n",
    "\n",
    "https://github.com/zwhe99/LLM-MT-Eval/blob/main/data/raw/wmt22.zh-en.en\n",
    "\n",
    "https://github.com/zwhe99/LLM-MT-Eval/blob/main/data/raw/wmt22.zh-en.zh\n",
    " -->"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "6b258fc5",
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "代码修改自GitHub项目pytorch/tutorials\n",
    "（Copyright (c) 2023, PyTorch, BSD-3-Clause License（见附录））\n",
    "\"\"\"\n",
    "SOS_token = 0\n",
    "EOS_token = 1\n",
    "\n",
    "class Lang:\n",
    "    def __init__(self, name):\n",
    "        self.name = name\n",
    "        self.word2index = {}\n",
    "        self.word2count = {}\n",
    "        self.index2word = {0: \"<sos>\", 1: \"<eos>\"}\n",
    "        self.n_words = 2  # Count SOS and EOS\n",
    "\n",
    "    def addSentence(self, sentence):\n",
    "        for word in sentence.split(' '):\n",
    "            self.addWord(word)\n",
    "\n",
    "    def addWord(self, word):\n",
    "        if word not in self.word2index:\n",
    "            self.word2index[word] = self.n_words\n",
    "            self.word2count[word] = 1\n",
    "            self.index2word[self.n_words] = word\n",
    "            self.n_words += 1\n",
    "        else:\n",
    "            self.word2count[word] += 1\n",
    "            \n",
    "    def sent2ids(self, sent):\n",
    "        return [self.word2index[word] for word in sent.split(' ')]\n",
    "    \n",
    "    def ids2sent(self, ids):\n",
    "        return ' '.join([self.index2word[idx] for idx in ids])\n",
    "\n",
    "import unicodedata\n",
    "import string\n",
    "import re\n",
    "import random\n",
    "\n",
    "# 文件使用unicode编码，我们将unicode转为ASCII，转为小写，并修改标点\n",
    "def unicodeToAscii(s):\n",
    "    return ''.join(\n",
    "        c for c in unicodedata.normalize('NFD', s)\n",
    "        if unicodedata.category(c) != 'Mn'\n",
    "    )\n",
    "\n",
    "def normalizeString(s):\n",
    "    s = unicodeToAscii(s.lower().strip())\n",
    "    # 在标点前插入空格\n",
    "    s = re.sub(r\"([,.!?])\", r\" \\1\", s)\n",
    "    return s.strip()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "01ae1e7f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 读取文件，一共有两个文件，两个文件的同一行对应一对源语言和目标语言句子\n",
    "def readLangs(lang1, lang2):\n",
    "    # 读取文件，分句\n",
    "    lines1 = open(f'{lang1}.txt', encoding='utf-8').read()\\\n",
    "        .strip().split('\\n')\n",
    "    lines2 = open(f'{lang2}.txt', encoding='utf-8').read()\\\n",
    "        .strip().split('\\n')\n",
    "    print(len(lines1), len(lines2))\n",
    "    \n",
    "    # 规范化\n",
    "    lines1 = [normalizeString(s) for s in lines1]\n",
    "    lines2 = [normalizeString(s) for s in lines2]\n",
    "    if lang1 == 'zh':\n",
    "        lines1 = [' '.join(list(s.replace(' ', ''))) for s in lines1]\n",
    "    if lang2 == 'zh':\n",
    "        lines2 = [' '.join(list(s.replace(' ', ''))) for s in lines2]\n",
    "    pairs = [[l1, l2] for l1, l2 in zip(lines1, lines2)]\n",
    "\n",
    "    input_lang = Lang(lang1)\n",
    "    output_lang = Lang(lang2)\n",
    "    return input_lang, output_lang, pairs\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "d6a561e0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2157 2157\n",
      "读取 2157 对序列\n",
      "过滤后剩余 2003 对序列\n",
      "统计词数\n",
      "zh 1368\n",
      "en 3287\n",
      "['大 河 风 光 摄 影 笔 记', \"the river's luminous photographic notes .\"]\n"
     ]
    }
   ],
   "source": [
    "# 为了快速训练，过滤掉一些过长的句子\n",
    "MAX_LENGTH = 30\n",
    "\n",
    "def filterPair(p):\n",
    "    return len(p[0].split(' ')) < MAX_LENGTH and \\\n",
    "        len(p[1].split(' ')) < MAX_LENGTH\n",
    "\n",
    "def filterPairs(pairs):\n",
    "    return [pair for pair in pairs if filterPair(pair)]\n",
    "\n",
    "def prepareData(lang1, lang2):\n",
    "    input_lang, output_lang, pairs = readLangs(lang1, lang2)\n",
    "    print(f\"读取 {len(pairs)} 对序列\")\n",
    "    pairs = filterPairs(pairs)\n",
    "    print(f\"过滤后剩余 {len(pairs)} 对序列\")\n",
    "    print(\"统计词数\")\n",
    "    for pair in pairs:\n",
    "        input_lang.addSentence(pair[0])\n",
    "        output_lang.addSentence(pair[1])\n",
    "    print(input_lang.name, input_lang.n_words)\n",
    "    print(output_lang.name, output_lang.n_words)\n",
    "    return input_lang, output_lang, pairs\n",
    "\n",
    "input_lang, output_lang, pairs = prepareData('zh', 'en')\n",
    "print(random.choice(pairs))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cfa6874b",
   "metadata": {},
   "source": [
    "为了便于训练，对每一对源-目标句子需要准备一个源张量（源句子的词元索引）和一个目标张量（目标句子的词元索引）。在两个句子的末尾会添加“\\<eos\\>”。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "92baa7c9",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2157 2157\n",
      "读取 2157 对序列\n",
      "过滤后剩余 2003 对序列\n",
      "统计词数\n",
      "zh 1368\n",
      "en 3287\n"
     ]
    }
   ],
   "source": [
    "def get_train_data():\n",
    "    input_lang, output_lang, pairs = prepareData('zh', 'en')\n",
    "    train_data = []\n",
    "    for idx, (src_sent, tgt_sent) in enumerate(pairs):\n",
    "        src_ids = input_lang.sent2ids(src_sent)\n",
    "        tgt_ids = output_lang.sent2ids(tgt_sent)\n",
    "        # 添加<eos>\n",
    "        src_ids.append(EOS_token)\n",
    "        tgt_ids.append(EOS_token)\n",
    "        train_data.append([src_ids, tgt_ids])\n",
    "    return input_lang, output_lang, train_data\n",
    "        \n",
    "input_lang, output_lang, train_data = get_train_data()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "411c9ed3",
   "metadata": {},
   "source": [
    "<!--训练时，我们使用编码器编码源句子，并且保留每一步的输出向量和最终的隐状态。然后解码器以“\\<sos\\>”作为第一个输入，以及编码器的最终隐状态作为它的第一个隐状态。-->\n",
    "接下来是训练代码。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "8b85d1ba",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch-19, loss=0.2351: 100%|█| 20/20 [18:48<00:00, 56.44s/it\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAioAAAGwCAYAAACHJU4LAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/GU6VOAAAACXBIWXMAAA9hAAAPYQGoP6dpAABM2UlEQVR4nO3deVxUVf8H8M/MAMO+yeaCK64o5h5amblranuP2ZO22KNhVqalT1maP8OWpyw127Xdylwq9w1Mc1/BBcVAUEGUfZEBZs7vD+TKwAzDDDPcGfi8Xy9ezdx77uV7HYjP69xzz1EIIQSIiIiI7JBS7gKIiIiIjGFQISIiIrvFoEJERER2i0GFiIiI7BaDChEREdktBhUiIiKyWwwqREREZLec5C6gLnQ6Ha5cuQIvLy8oFAq5yyEiIqJaEEIgPz8fzZo1g1JZc5+JQweVK1euIDQ0VO4yiIiIyAKpqalo0aJFjW0cOqh4eXkBKL9Qb29vmashIiKi2sjLy0NoaKj0d7wmDh1UKm73eHt7M6gQERE5mNoM2+BgWiIiIrJbDCpERERktxhUiIiIyG4xqBAREZHdYlAhIiIiu8WgQkRERHaLQYWIiIjsFoMKERER2S0GFSIiIrJbDCpERERktxhUiIiIyG4xqBAREZHdcuhFCW2luFSLzMISKBVAUx83ucshIiJqtNijYsCfJ9MwYNFOvLL6pNylEBERNWoMKgZ4qss7mgo1ZTJXQkRE1LjJGlS0Wi3mzp2LNm3awM3NDe3atcOCBQsghJCzrEpBRStrHURERI2drGNU3nnnHSxfvhzffPMNwsPDcfjwYTz55JPw8fHB9OnTZavLQ60CABSwR4WIiEhWsgaVv//+G+PGjcPo0aMBAK1bt8ZPP/2EgwcPylnWrR6VEgYVIiIiOcl666d///7YsWMHzp07BwA4ceIE9uzZg5EjRxpsr9FokJeXp/dlCx4co0JERGQXZO1RmT17NvLy8tCpUyeoVCpotVosXLgQEyZMMNg+Ojoa8+fPt3ldzqry/Famk3esDBERUWMna4/KL7/8gh9++AE//vgjjh49im+++Qbvv/8+vvnmG4Pt58yZg9zcXOkrNTXVJnUpFeX/FQKyD+wlIiJqzGTtUZk1axZmz56Nf/3rXwCAbt264eLFi4iOjsbEiROrtVer1VCr1TavS6lQSK+FACq9JSIionoka49KUVERlEr9ElQqFXQ6nUwVlascVHTsUSEiIpKNrD0qY8aMwcKFC9GyZUuEh4fj2LFj+OCDD/DUU0/JWRYUlbITh6kQERHJR9agsmTJEsydOxfPPfccMjIy0KxZM/znP//BG2+8IWdZ7FEhIiKyE7IGFS8vLyxevBiLFy+Ws4xqlJXGpDCnEBERyYdr/RjAHhUiIiL7wKBiQOWnfBhUiIiI5MOgYoB+j4qMhRARETVyDCoG6M+jwqRCREQkFwYVA5R6t37kq4OIiKixY1AxQMHBtERERHaBQcWIil4VBhUiIiL5MKgYUTFOhTmFiIhIPgwqRlQEFfaoEBERyYdBxQiFdOtH3jqIiIgaMwYVI6QeFSYVIiIi2TCoGFExmJZ3foiIiOTDoGIEx6gQERHJj0HFCAUfTyYiIpIdg4oRSmVFj4rMhRARETViDCpG3JpHhUmFiIhILgwqRij5eDIREZHsGFSMUHAwLRERkewYVIzgWj9ERETyY1Axgmv9EBERyY9BxQjOo0JERCQ/BhUjuNYPERGR/BhUjGCPChERkfwYVIy4tdYPgwoREZFcGFSMuNWjInMhREREjRiDihHSGBUmFSIiItkwqBjBHhUiIiL5MagYwbV+iIiI5MegYgQfTyYiIpIfg4oRfDyZiIhIfrIGldatW0OhUFT7ioqKkrMsAIDy5r8MgwoREZF8nOT85ocOHYJWq5Xex8fHY+jQoXj44YdlrKoc1/ohIiKSn6xBJTAwUO/9okWL0K5dOwwcOFCmim5R8NYPERGR7GQNKpWVlJTg+++/x4wZM6SQUJVGo4FGo5He5+Xl2aweJQfTEhERyc5uBtOuW7cOOTk5mDRpktE20dHR8PHxkb5CQ0NtVg8H0xIREcnPboLKV199hZEjR6JZs2ZG28yZMwe5ubnSV2pqqs3q4Vo/RERE8rOLWz8XL17E9u3bsWbNmhrbqdVqqNXqeqlJwZlpiYiIZGcXPSorVqxAUFAQRo8eLXcpkltjVJhUiIiI5CJ7UNHpdFixYgUmTpwIJye76OABwLV+iIiI7IHsQWX79u1ISUnBU089JXcperjWDxERkfxk78IYNmyYXYYBBW/9EBERyU72HhV7Jd360clcCBERUSPGoGIEB9MSERHJj0HFCK71Q0REJD8GFSO41g8REZH8GFSM4Fo/RERE8mNQMYJr/RAREcmPQcUI5c1/GXt8dJqIiKixYFAxgmv9EBERyY9BxQje+iEiIpIfg4oRHExLREQkPwYVI7jWDxERkfwYVIzgWj9ERETyY1AxQsnBtERERLJjUDGCa/0QERHJj0HFCK71Q0REJD8GFSOkeVR474eIiEg2DCpG8PFkIiIi+TGoGFFx6+fD7eew+9w1mashIiJqnBhUjKh4PBkAnvj6oPS6TKuToRoiIqLGiUHFiIT0/GrbPolJRMe5m3EsJVuGioiIiBofBhUjNGXVe07e3ZwArU7gjfWnZKiIiIio8WFQsYBSYboNERER1R2DihFVH/b551qB9FqhYFIhIiKqDwwqxlSZ6e3nw6nSa+YUIiKi+sGgYkTVHpXPYv+RXjOnEBER1Q8GFSOcahiIwls/RERE9YNBxQhnFf9piIiI5Ma/xkZw1WQiIiL5MagYoeUiP0RERLJjUDGippxy5CJnpiUiIqoPDCpGlOm4pg8REZHcZA8qly9fxuOPP44mTZrAzc0N3bp1w+HDh+UuC8WlDCpERERyc5Lzm2dnZ2PAgAEYNGgQNm3ahMDAQJw/fx5+fn5ylgUAuFGilbsEIiKiRk/WoPLOO+8gNDQUK1askLa1adNGxopuuadTEL7bf1HuMoiIiBo1WW/9/P777+jduzcefvhhBAUFoUePHvjiiy+MttdoNMjLy9P7spU5ozoh+oFuRvefupILcfMR5oz8Ygz7MBZf7UmyWT1ERESNkaxB5Z9//sHy5cvRvn17bNmyBVOnTsX06dPxzTffGGwfHR0NHx8f6Ss0NNRmtbm7OGF835ZYFzXA4P7RH+9BzLlrAIAPt53DuasFWPDnaZvVQ0RE1BjJGlR0Oh169uyJt99+Gz169MCzzz6LyZMn49NPPzXYfs6cOcjNzZW+UlNTDbazpttCfY3u++PEFQAcz0JERGQrsgaVpk2bokuXLnrbOnfujJSUFIPt1Wo1vL299b7ktOboZQCAlnPDERER2YSsQWXAgAFISEjQ23bu3Dm0atVKpoosU3m6fSEENGXsYSEiIrIGWYPKSy+9hP379+Ptt99GYmIifvzxR3z++eeIioqSs6xqvF2NPxwlhMCGk2nS+5d+Po4ub2xBWu6N+iiNiIioQZM1qPTp0wdr167FTz/9hK5du2LBggVYvHgxJkyYIGdZ1bg4qYzu25WQofd+3fEr0OoEfthv+PYVERER1Z6s86gAwL333ot7771X7jJqpHYynufiLhl+RFqpVNiqHCIiokZD9in0HUHnpsYH7R6+mGVw+9k0283xQkRE1FgwqNTCW+PCje776/x1g9u3nr4KbU1LMBMREZFJDCq14O/hYtFxPx3kOBUiIqK6YFCpBScLx5tsOZVu5UqIiIgaFwaVWnBSKfH1pN5496EIs45TKDigloiIqC4YVGrpnk7BeKS3eWsLMaYQERHVDYOKDbFDhYiIqG4YVGxI8KEfIiKiOmFQsaHYc9ew/vhlucsgIiJyWAwqNvbCquNyl0BEROSwGFTqwSOf7sOWU+kQvBdERERkFgaVenAwOQv/+e4IYs5dk7sUIiIih8KgUo/WHuV4FSIiInMwqNSj309ckbsEIiIih8KgQkRERHaLQaWefb//Igo0ZXKXQURE5BAYVOrZ6+vi8cb6eLnLICIicggMKjJYw0G1REREtcKgQkRERHaLQYWIiIjsFoMKERER2S0GFTN99K/b0D7IU+4yiIiIGgUGFTONu605ts0YWOfzcN0fIiIi0xhUZHIp+4bcJRAREdk9BhWZbDt9Ve4SiIiI7B6DioWGdQmu0/FaHW/9EBERmcKgYiGlQmF0X2TbJiaP13GMChERkUkMKhaqIafgYHKWyeMZU4iIiExjULFQvzb+RvfV5rYOe1SIiIhMY1Cx0ITbW2Hh/V0tPr6Jh4sVqyEiImqYZA0q8+bNg0Kh0Pvq1KmTnCXVmrNKiQn9Wll8/JZTfOqHiIjIFNl7VMLDw5GWliZ97dmzR+6SzPLsXW0tOq6GIS5ERER0k5PsBTg5ISQkRO4yLPbfUZ3x4pD2yCkqRf9FO2t93IlLuTasioiIqGGQvUfl/PnzaNasGdq2bYsJEyYgJSXFaFuNRoO8vDy9L3vg7uIED/WtzOftajr/RbYz/QgzERFRYydrUOnXrx9WrlyJzZs3Y/ny5UhKSsKdd96J/Px8g+2jo6Ph4+MjfYWGhtZzxcYpK93LeXWk6XE2Y7s3s2E1REREDYOsQWXkyJF4+OGHERERgeHDh2Pjxo3IycnBL7/8YrD9nDlzkJubK32lpqbWc8XGVZ4ArqW/u8n2fDyZiIjINNnHqFTm6+uLDh06IDEx0eB+tVoNtVpdz1XVTuWgolKaHiqr4xT6REREJsk+RqWygoICXLhwAU2bNpW7FLMpK/1LOilN/7Nq2aNCRERkkqxBZebMmYiNjUVycjL+/vtv3H///VCpVBg/frycZVlEv0fFdHt2qBAREZkm662fS5cuYfz48cjMzERgYCDuuOMO7N+/H4GBgXKWZZHKN3sUNS0EdBNv/RAREZkma1BZtWqVnN/eqlRKBbq38EFecRnaNPEw2b426wERERE1dnY1mNaRKRQKrH1uAASAwpIyk+05RoWIiMg0BhUrUt582sfNWWWyrWBQISIiMsmunvppKJxVSmycfmeNbUq1Am/9cRqb4tLqqSoiIiLHw6BiI12aeUuvm/u6Vdv/+4kr+HpvEqb+cLQ+yyIiInIoDCr1oH2wZ7Vt6bnFMlRCRETkWDhGxYa2vHgXsotK8NuRS9X21WYK/ZIyHZyUCmnsCxERUWPDHhUb6hjihdvbNkGJVldt36XsGzUee6NEi14LtmHcsr1624UQHIhLRESNBoNKPdCUVg8qphxLzUa+pgxxl3OlbUIItJmzEW3mbOSEcURE1CgwqNSDpr6uNe4vM9DjYsjptDzpdUpWUZ1qIiIicgQMKvUgyKvmoFKi1UFTpjV5nrScWwNwy3Tm99IQERE5Gg6mrQfNTPSoxCZcw9QfjuKJyFbwVDvh8dtbGWxXeVHmUi1v/RARUcPHoFIP7o1ohgsZBejd2h9PfH2w2v5XVp8EAHy77yIAYPuZq5g3JlzaX6gpg4faCYpKSx+WMagQEVEjwKBSD1RKBWYM62h0f75Gf22gc1cLUHmsbFruDYQFeaHyosylvPVDRESNAMeo2Cn9eVaqz6PCHhUiImoMGFTsVOXVlSt6UpwqDVLhXCpERNQYMKjYKW2lHpOcolIAgLfbrTt1Lk786IiIqOHjXzs7tfV0uvT6weV/4/zVfJRVGrjy29Fb0/KXlHG8ChERNUwMKnYq/nKe3vuhH+7Ga2vjpfdqJxUA4IOtCejw+iYcTcmu1/qIiIjqA4OKnao8C22FM5W2DQ8PAQB8vDMRALDgz9P1UxgREVE9YlBxUIoqDwJlFpTIUwgREZENMajUs++e7muV8wgBTFpxa/I4rv1DREQNEYNKPevT2t8q57mWr0FMwjWrnIuIiMheMajUs6q3bCwV9eNR65yIiIjIjlkUVL755hts2LBBev/KK6/A19cX/fv3x8WLF61WXEOktFZSISIiagQsCipvv/023NzcAAD79u3DsmXL8O677yIgIAAvvfSSVQtsaGwZUwqrrBlERETk6CwKKqmpqQgLCwMArFu3Dg8++CCeffZZREdH46+//rJqgQ2NLXtUxi7dY7NzExERycGioOLp6YnMzEwAwNatWzF06FAAgKurK27cuGG96hogW975uXCt0HYnJyIikoFFQWXo0KF45pln8Mwzz+DcuXMYNWoUAODUqVNo3bq1NetrcBQ2HqPy9sYzNj0/ERFRfbIoqCxbtgyRkZG4du0afvvtNzRp0gQAcOTIEYwfP96qBZJ5Pt/9j9wlEBERWY2T6SbV+fr6YunSpdW2z58/v84FEREREVWwqEdl8+bN2LPn1sDNZcuW4bbbbsNjjz2G7GwujkdERETWYVFQmTVrFvLyyhfIi4uLw8svv4xRo0YhKSkJM2bMsKiQRYsWQaFQ4MUXX7ToeCIiImp4LLr1k5SUhC5dugAAfvvtN9x77714++23cfToUWlgrTkOHTqEzz77DBEREZaUQ0RERA2URT0qLi4uKCoqXwRv+/btGDZsGADA399f6mmprYKCAkyYMAFffPEF/Pz8LCnH4fRprX+dXz7RW6ZKiIiI7JtFQeWOO+7AjBkzsGDBAhw8eBCjR48GAJw7dw4tWrQw61xRUVEYPXo0hgwZYrKtRqNBXl6e3pcjCvFx03vfPthTpkqIiIjsm0VBZenSpXBycsLq1auxfPlyNG/eHACwadMmjBgxotbnWbVqFY4ePYro6OhatY+OjoaPj4/0FRoaakn5RERE5CAsGqPSsmVL/Pnnn9W2f/jhh7U+R2pqKl544QVs27YNrq6utTpmzpw5eoN18/LyHDKsCCH03of41O76iYiIGhuLggoAaLVarFu3DmfOlM+EGh4ejrFjx0KlUtXq+CNHjiAjIwM9e/bUO+fu3buxdOlSaDSaaudSq9VQq9WWlmw3quQUOCst6tgiIiJq8CwKKomJiRg1ahQuX76Mjh07Aii/LRMaGooNGzagXbt2Js8xePBgxMXF6W178skn0alTJ7z66qu1DjyOSEA/qeiqJhciIiICYGFQmT59Otq1a4f9+/fD398fAJCZmYnHH38c06dPx4YNG0yew8vLC127dtXb5uHhgSZNmlTb3tBUzSU65hQiIiKDLAoqsbGxeiEFAJo0aYJFixZhwIABViuuoYoaFIZN8enSe2eVbRcqJCIiclQWBRW1Wo38/Pxq2wsKCuDi4mJxMTExMRYf60i6NvfRe2/rFZWJiIgclUWjOO+99148++yzOHDgAIQQEEJg//79mDJlCsaOHWvtGomIiKiRsiiofPzxx2jXrh0iIyPh6uoKV1dX9O/fH2FhYVi8eLGVS2yYFtxXPg5n6WM9qu0b39fxHrkmIiKyBYtu/fj6+mL9+vVITEyUHk/u3LkzwsLCrFpcQ/bv21vh4V4t4Opc/nSTQlE+yLaJhwu6NffFT0iVuUIiIiL51TqomFoVedeuXdLrDz74wPKKGpGKkAIAa6b2xwfbzuG10Z2RXVgqY1VERET2o9ZB5dixY7Vqx4GhlunR0g/fPd1P7jKIiIjsSq2DSuUeE2r4hBCY+v1RaIXA5//uxQBKRESysHgKfWrYcm+UYvOp8rlerheUINDL8ZcuICIix8NFZuzc15N6m31M1UUPLcHZcomIyB4wqNg5pQW3XEq15SmjTKtDblHdB+byrg8REcmFQcXOWRJUKhY9vP+Tv9H9ra1IySyydllERET1gkGlAaq48xN3ORcA8GfcFbOOj0nIwMXMQmuXRUREZDYOprVzHmqV6UZVVB2iYs6QlQP/ZGLSikNmf08iIiJbYFCxU3NGdkJyZiF6tvSrtq93Kz8cvpht9FhdHQbT7kq4Vm2bFcbmEhERWYRBxU79Z2A7o/sGhAWYHVSEENXmQtHpBBSKW5P0lZTpcDSl+nmt8RQRERGRJThGxQEFe7tKrzuFeFXbX/XR4k9jLqDPwu1Ivn5r3EmpVoc+C7fjX5/vBwBM/f4IOry+CQeTskyej4iIqL4wqDggX3dn6bWzqvpH2H3+Vvx6+NaihvmaMlwvKMHCjWekbTvPZiCzsAQHbgaTTfHpRr9fXW4lERER1QVv/Tig5r5u+PzfveCpdsLbm84YbDNr9clq24QQKNCU4f5le3E+o0Da/tH28zV+PwYVIiKSC4OKAxgd0RQbTqYBAB6/vSUiWvhAofAFYN5A1/MZBXho+d96IQUAPtx+rsbjmFOIiEguvPXjAKbcdWtg7f/d101vUGzlNXiUJuaGu5hZhLPp+WZ/fwYVIiKSC4OKA2gX5AF/DxeDA2ffvr8b7ggLwNeTettsheMCTZlNzktERGSKQjjws6d5eXnw8fFBbm4uvL295S7HpopLtQAAV2fjE8C1f22jtM6PtSUuHAknAwN3iYiIzGXO32/+5XEQrs6qGkMKAJv1qABAzo26L25IRERkLgaVBsTUGJW60HIyFSIikgGDSgNiyUrLtVWq1dns3ERERMYwqDQgtgwqJWUMKkREVP8YVBqQwhLbPZ1jq0G6RERENWFQaUBs+fzWp7EXuDghERHVOwYVqpW1xy5jy6mrcpdBRESNDIMK1dpHO2peE4iIiMjaGFSo1s6k5cldAhERNTKyBpXly5cjIiIC3t7e8Pb2RmRkJDZt2iRnSURERGRHZA0qLVq0wKJFi3DkyBEcPnwY99xzD8aNG4dTp07JWRYRERHZCSc5v/mYMWP03i9cuBDLly/H/v37ER4eLlNVREREZC9kDSqVabVa/PrrrygsLERkZKTBNhqNBhqNRnqfl8cxE0RERA2Z7INp4+Li4OnpCbVajSlTpmDt2rXo0qWLwbbR0dHw8fGRvkJDQ+u52satVRN3uUsgIqJGRvag0rFjRxw/fhwHDhzA1KlTMXHiRJw+fdpg2zlz5iA3N1f6Sk1NredqGzc3E6s3ExERWZvst35cXFwQFhYGAOjVqxcOHTqEjz76CJ999lm1tmq1Gmq1ur5LJCIiIpnI3qNSlU6n0xuHQkRERI2XrD0qc+bMwciRI9GyZUvk5+fjxx9/RExMDLZs2SJnWWSEt5uz3CUQEVEjI2tQycjIwBNPPIG0tDT4+PggIiICW7ZswdChQ+Usi4wY2CFQ7hKIiKiRkTWofPXVV3J+ezKTQiF3BURE1NjY3RgVIiIiogoMKg3UkwNaG+0BCfF2teicQtShICIiIgswqDRQ/7mrHRIWjMRzd7dD71Z+0vZQfzc82Ku5RefU6phUiIiofjGoNCBrnusvvVYqABcnJV4Z0Qmrp97aLgTg7mLZ0CQGFSIiqm8MKg1Iu0BP6bWzyvhH++fJNOl19APd9PZ1CvEyepzOwns/xaVarDqYgvTcYsRfzkVRSZlF5yEiosZH9plpyXp83JwxbVAYlEoF/DxcDLYRAkjPvSG9H9+3JUL93PH4VwcAAD9Ovh3f77+Iy9k3cCg5C/9cL5TaWtqj8s7ms1ixN1l676xS4OyCkVAp+RgRERHVjEGlgZk5vKPJNlXzxoCwJhjfNxQdgr3g7+GC6YPbAwAe+WyfVYLK9jNX9d6XagXm/3EK3Zr7YN3xy/jksV7wcedkckREVB1v/TRCvlVCgUKhQPQDEXhyQBu97bNHdtJ7b05QSckswuHkLACGnxb6dt9FzFp9EnsTM7F01/lan5eIiBoXBpVGaHh4SK3a9WzphxNvDpNu0ZjToXLXe7vw0Kf7kJhRYPKx5vxijlkhIiLDGFQaIXNmmPVxc8aUgW0B1DyYtqikDNmFJfhi9z9IzSqStp9JyzP5PTg/CxERGcMxKo1E91BfnEjNwQM9m6Nbcx+zjlWaSDbFpVp0eePWQpJLdt66laNSKiBMJJGfD6finYcizKqJiIgaBwaVRuK7p/vicHIW7mwfCCelAp9M6InOTb1rdaxCUXHrx3DgOH+1QO99XqVbOU5KBdhhQkRElmJQaSS8XZ1xT6dg6f2obk1rfWzFU8TGgsrMX08YPba8R6XW34qIiEgPx6iQSUrFrcG0/1wrQOvZGzDx64PS/oSr+UaPfW9LAtLzim1eIxERNUwMKmRSxQgVIQTu+V8sACD23DUAgM7Eo0Bn042HGCIiIlN464dMUt6891NUotXbvnDDaXzxV5IcJRERUSPBHhUyqeKhn/XHr+htZ0ghIiJbY1Ahk0w9nkxERGQrDCpkEtcOJCIiuTCokEnpuRq5S6g1TZkW2YUlcpdBRERWwqBCJu0+f03uEmpt4Lsx6LFgGzLy+Ug0EVFDwKBCJtW0xo+9qZizZd+FTJkrISIia2BQIdMcJ6dIHChbERFRDRhUyCRH6lGpoDUxER0RETkGBhUyyRH/5jtiuCIiouoYVMgk4YD3fhhUiIgaBgYVMkmr5R99IiKSB4MKmRTs4yp3CbUiKvWiKMBZ6oiIGgIGFTLJz91F7hJqJTGj4NYb5hQiogaBQYVMEg4y3qOs8qhfxyiZiIhMYFAhkxzlb37ltRMdcQAwERFVJ2tQiY6ORp8+feDl5YWgoCDcd999SEhIkLMkMkDt5Bh5Vqe79VqrM96OiIgch6x/gWJjYxEVFYX9+/dj27ZtKC0txbBhw1BYWChnWVTF66O72Px7pGQW1fkclR9J5uPJREQNg5Oc33zz5s1671euXImgoCAcOXIEd911l0xVUVWh/u5oH+SJ8xkFuO+2Zlh3/IrVv4emTFvncygr3ftRKTmaloioIZA1qFSVm5sLAPD39ze4X6PRQKPRSO/z8vLqpS4CfpjcD1tPXcV9PZrXOai09HdHSpZ+D4o1+j881bd+nH3dnK1wRiIikpvdDD7Q6XR48cUXMWDAAHTt2tVgm+joaPj4+EhfoaGh9Vxl4xXk5YrHb2+lFwYqGx4eXM8VVVd5AG16XjFKOVCFiMjh2U1QiYqKQnx8PFatWmW0zZw5c5Cbmyt9paam1mOFVJMuTX1q3XbaoDAAQM+WvtI2S+7UlJTpcDQlG1O+O4LUrCK9NYnm/3Ea9368x/yTEhGRXbGLWz/Tpk3Dn3/+id27d6NFixZG26nVaqjV6nqsjGrLx612P0qf/7sXhnYJRu/WflAqFLj7/RgAgEJhXlJJvl4oHQsAV/OL8f7D3fXaJFzNR1ZhCfw9HGPCOiIiqk7WHhUhBKZNm4a1a9di586daNOmjZzlUB0EeKmx9rn+RvcffG0wkheNxrDwECgUCrQN9NQb8Gpuh8qH28/pvb+QUYAZv5yo1u6V1SfNPDMREdkTWYNKVFQUvv/+e/z444/w8vJCeno60tPTcePGDTnLIgsooECPln7Se38PF0y+81bwNPS0cAs/N+m1s8q8H8WNcWl67/OKy3AiNadau4NJmWadl4iI7IusQWX58uXIzc3F3XffjaZNm0pfP//8s5xlkQnj+5oexKwAMHtkZ+m9odsvCoUC7i4qAIaDTE1KuaIzEVGjIOsYFUdZQ4b0tQv0rLYtwFM/iCgUCqiUCpx+aziEMN5jUjH3iTkTtN0o0aJTiBfOpuebUTURETkiu3nqhxyH2lklvR4RHoIXh7RH3zblc9880KM5AOD5e8qf7HF3cYKHkUeagVtP+1QNKhtOpmHKd0dQoCmDEAJPrjiIOWviAADDFsfWOqSYO0iXiIjsi1089UOOpfLaP5/+u5fevvce7o6oe8LQNsCjVudSKg33qET9eBQA0HKHO+7uEIhdCdcAANMHhyE1i2OYiIgaC/aokNma+7oZ3adSKtAu0LPWPRkVrRLSCwAAJy/lYMKX+6X9n+/+B499eUB6v+og584hImpM2KNCZuvfrgleGtIBYUHVx6qYK7uoFEB5D8roiNEYu3Rvje2dVebdyuGdHyIix8agQmZTKBR4YUh7q5/3uIHHi6tSKc3rBFQyqRAROTTe+iG7cd+ymntTAOCdzWfNOieDChGRY2NQoQbNkjWEiIjIfjCoUIPGHhUiIsfGoEINWnpesdwlEBFRHTCoEBERkd1iUCEiIiK7xaBCZIZNcWn4z3eHkVdcKncpRESNAoMKyWpQx0C5SzDL1B+OYsupq3hvc4LcpRARNQoMKiSrIV2CrX7O8wtHWv2cVV3KLrL59yAiIgYVklmwl6tVzzciPATOqls/1k19rHv+CgWaMpucl4iI9DGokKwiWvhY9XyzR3YCANzVofyWUp/W/lY9f4VDydk2OS8REeljUCFZlWh1VjvX2uf6o3WAB4BbY190Qljt/EREVP8YVEhWId7WuTUzpHMwurfwld5XzEhrzZxyo0RrvZMREVGtcPVkkpXKCovx7Hl1EFr4uettU948r6EelbziUkz/6RjGdm+GB3q2qPHcX/71D3RC4KkBbdB34fY610pEROZhUCFZKcxYi6d9kCfOZxTobesU4lUtpAC3FiM0FFSWx1xATMI1xCRcQ4dgL4QFecLVWaXXJiE9Hycv5eD/NpwBANzTKQj5HEBLRFTveOuHHMaXE3vrvQ/yUuN/j3Q32Lbi1o+hITBZBSXS63uX7MHjXx4AABRqyrDvQia0OoHhi3dj1uqTUrshH+yua/lERGQBBhVyGK2aeKBHS18AwPDwYBz472CENzP81JBKGqNS3qOy4WQaXlh1DEKIar0shy+WP8Hz5IpDGP/Ffnz51z+1qken40BdIiJb460fsjteaiejt1m+mtgHG+PSMPa2ZjXeNqrYpb0ZSqJ+PAoAaOnvjl+PXDJ4zMHkLABA9KaztaqzTCfgYoUxNkREZBx7VMjuvDk2HAAwpnszPNo7VG+fv4cLHr+9FbxdnWs8h0oaTAuUVrr/s2RnosH2y2MumF0nJ30jIrI99qiQXZkxtAMe7NkcY7o3hYtKiQJNGdTOSozp3sys81SMUdHpBMYu3Wuy/Tuba9eLov89zD6EiIjMxKBCdmX64PYAALVT+VM4Xq7OeGtcV7PPU/F4slYncCYtz3oFVqLlGBUiIpvjrR9qkCoG09pyZlotZ70lIrI5BhWyG2/c28Vq56ppHhVr0Vlv9n8iIjKCt35Idv8d1QlHL+ZgUv/WVjtnxa0fWy4eyB4VIiLbY1Ah2T17Vzurn1Npxoy3luI8KkREtifrrZ/du3djzJgxaNasfE6MdevWyVkONSCiHno7OJiWiMj2ZA0qhYWF6N69O5YtWyZnGdQA1UeI4K0fIiLbk/XWz8iRIzFy5Eg5S6AGqrQeggpv/RAR2Z5DjVHRaDTQaDTS+7w828yPQY5PWw+P5FizR0VTpkXS9UJ0DPYya0VpIqKGzqGCSnR0NObPny93GeQAyrSOMUZFpxPILCxBn4XbpW1J0aMYVoiIbnKoeVTmzJmD3Nxc6Ss1NVXukshO1ccYlbp22mh1AgPf36UXUgAgI19j5AgiosbHoYKKWq2Gt7e33heRIWVWCio/PNPP6L66TCa3/fRVvPjzcaRm3bDqeYmIGhqHCipEtWWtP/YDwgKM7qvNGJXcG6X49XAq8opLpW0J6fl45tvD+OPEFYPHnE3PN79QIqIGStagUlBQgOPHj+P48eMAgKSkJBw/fhwpKSlylkUNgI+bs8XHXnh7FJ6+ow2+e7pvje1qc3vphVXHMGv1Scz4+YS0Lel6QY3HPLniUO0KJSJqBGQNKocPH0aPHj3Qo0cPAMCMGTPQo0cPvPHGG3KWRQ3A6G5N8UDP5ibbvfdQBA6+NhhhQZ4AgLAgT6iUCsy9twvubB9Yrf3QLsHS69oM2I1JuAYA2H7mKpbtSkTr2Rtw/mrNQYWIiG6R9amfu+++u15mEKXGx0mlxAeP3IY1Ry/X2M7FSYkgL1dsnH4ndpy5itvbNqmx/RdP9MaIxbtxNj0fZTdH02rKtPh4x3kM6hiE3q39jR773pYEAMD/tp0z82qIiBovjlGhRq0iJ7s4KTGyW1P4ebiYPMZZVf5rU9GjsnJvMpbtuoCHPt2HQk0ZvvzrH1zJqT5IloiIzMegQo2agPk9ek6q8jlOSrU6CCEQvemstO/Rz/fh/zacQf9FO61WIxFRY8agQo2Cl9rwXU5XJ5XZ53JWlv/apGQV4UBSlt6++Mu3ZkvOr/SkT30TQuD0lTwUl2plq4GIyBoYVKhBe/fBCIQFeWLD9Dv1tk+/Jwz3dArCkEqDY2tLpSzvUfm/DWfwWewFo+26zdtq9rmtZd3xyxj18V8YsXi3bDUQEVmDQ02hT2SuR/qE4pE+oQAADxcVCku0CPBUY8awjrU+x4on++DJFYdwZ/vyOVUqbv0AwK6bT/XYE51OYNmu8gCVnFkkczVERHXDoEKNxvaXB2L14UsY36+lWccN6hiEPa8OQoi3K4Bbg2nt0eb4dLz8y3EUlvCWDxE1DPb7f1wiK2vq44bnB7dHgKfa7GNb+LnD6WZAcVJab8HATiFe0utWTdzNOlYIgROpOSjUlAEAjqVkY8r3R6qFFE2ZFmuPXUJGfnHdCyYiqmcMKkRmsmaPyhv3dpFe/3dUZwBA56Y1r2GVVViCmIQMzP/jNMYt24tHP9+Hs+l5uP+Tvw22X7IjES/9fAIPLje8n4jInvHWD5GZMgutt7pxZLsmuKtDIDzVKni5lv86lmlrXpZ52Ie7cb3gVg3xl/Pww37jy04s3ZUIAAYXQCQisncMKkRmunCt0GrnUigU+Pap8jWFDiWXP+psauXnyiGlwnf7L1qtJiIie8KgQmSmQE81ruXXvVfljiorM1eMfSk10KNSUqZDanYRvtj9T52+pxACCoX1xtgQEdkagwqRmaz1d3754z313ledmv9MWh6e+eYwXh7WAeuPX0Hsubo/Cn0+owAdgr1MNyQishMcTEtkpvzishr3x8y8G/PHhuttOzp3KJKiR+lt83J11ntfMT9LxWKHr6w+ics5NzDjlxNWCSlAec8MEZEjYVAhMlNKlv4kat1b+Oi9D/Fxxb9vb6W3zd/DRe+Wy+ujO1c7r9PNqfkLNGU4lJyFuMu51ipZ4upsnV95IQQ0ZZyrhYhsj0GFqA6SF43G2ucG6G1TOymhVCqw9rn+UCkVWDK+R7XjerT0q7bN+WaPSnGpDg9/us8m9Woq9agcSs7CqSvmhaHrBRpMWnEQneZuRsfXNyMmIUPWNY2IqOHjGBWiOlJWmQCuouekR0s/XHhb/3bP15N649zVAvRs6VvtPFoTT/tYQ8Wtn8SMAikMJS8abbT9m+vj8c2+ixjdrSk2xKVV2z9pxSE093XD3tn32KZgImr0GFSIzOTn7ozsIst6Ee7pFIx7OhleCLHi1o8tVQSV/66Jk7ZpdUJaaBEAjlzMRoCnC9ROKnyzr/yxZ0MhpcLlHM7PQkS2w6BCZKZtMwbi/S0JeMzMNYNM8XS1/a9jyc1Hn7OLSqRthSVl8L45sDf5eqFFM9j+b2sCXjZjoUciotriGBUiMwV4qrHowQhEtPCVtnW8+cjvj5P7WXxefw8Xi499sGeLWrXTlJYHlco9KBWPQwshMPWHoxZ9/yU7Ey06jojIFPaoEFnBxhfuRKlWB1dnVb19z16t/HDhWgFyikoxZWBbLLy/K5beDAy/n7iCb5/qi7vfj9E7Zs2xS7inU5DeekXbTqfj0T4tsSshA2fS8uqt/gqx565h4tcHpfcKBfDW2HAUaLQY3zcUvu6WBzgicnwMKkRWoFIqoFLWX0gBgNVTInE2PR8FmjK0v9mjM3N4R73/zh8bjhulWvx1/hr2JmZiY1w61nS6jGBvNeIul59n66mrGB3RDNN+PGaTOotLtSgq0RrsMdLphF5IAQAhgLnrTwEAVh1KQeysQTapi4gcA4MKkYPY8+og7DiTgTd/L/8jrlAoTK60PLF/awDA3sTr0raZv57Qa7PjbAa6vrnFusVW0mnuZgDAT5NvR2S7JriYWYifD6WiR0s/TP72cI3HXswsqnE/ETV8DCpEdmT7jIEY8kFste0rJvVBCz93TOzfGiO7hiDQS23Wef86f910ozrKLNDgp4Mp8HJ1RrtAT9zRPgBC3HrkevwX+/HeQxGYtfqkzWsByud82XchE8PDQ+DidOtW186zVzH/j9NY9lhPdG3uU8MZiMgeMKgQ2ZGwIM9q26rOcxLk7Vpf5dRoXdQAPPrZPmkSuTfWn9J7jLlbc59qs+vWV0gBgN7/tx0AMKpbCD6Z0AtAeXh5amV5L869S/bUOIeMuQs4ZhWWYP8/mRjaJVhvDBAR1Q1/m4jszPmFI6XXC+/vKmMl+qYNCtN77+GiQseQWwscVp1rxVpLAFzKNu/2T+6NUr3ZcjfGpaP7/K349XCqFF5MeWj532gzZyO2nb5qcH+hpgzbT19FcWn5MgJFJWXouWAbnvvhKD7ZdcGsemtDCMF1mhxcoabM4MroZBp7VIjsjLNKifMLRyKrsATBVuo9CQvyRGJGQa3bn10wAgeSsvQGuvZv1wT/GdgW3eZtBQC0C/RE7g3bT58/5INYFJfq8FCvFnj/4e44fzUf/h4uyC8uw9z18Zh6dzv0bxeArMISvLY2Dpvi06udI/dGqVm9OYcvZgMAJn972GCvS/jNMT0BnmpcL9Do7ftw+zm8MKS9OZcIoLxHxsVJCU919f8tt5mzEUB5cJ3Qr1W1/WTfMgs06HUzJNfUi0eGMagQ2SFnldJqIQUAtr10F/q+vQPX8jVG2/i5O6OwRIt9s++Bq7MKAzsEYv+cwXhtbRySMgvRq7Uf1E4qaRVohUKBF4e0x0s/nzB6Tmsovjn3y+ojl7D6yKVq++tj/E1lJy/lSK+rhpQKtbltJITA8tgLGNAuAC383KQ/ZEnRo6BQKHDuaj7eXH8KT93RRjrmtbXxZgUVIQTKdALOKiV0OlFtuQdruF6ggafaqV4fzXc0d7yzS+4SHBqDClEjoFAocOi1IWg9ewMA4Lep/XHkYhaa+boh70YZhnQOMjj2JcTHFV9O7A0hbq1pVPkP8H23Na9zUPnw0e565zjw38G4lq/BvUv21Om85vjjxBXEJFzD/x7pjnc2n9Xb13r2BnRp6g0XJyWOp+bU6nzrjl/G/T1qnoTv3iV7cOpKHoAEPBF5K3xU9J5U2PdPZq2+pyFVz/XJhJ4Y1a0pAODjHefxwbZzeLR3KNoHe+KZO9uaff6UzCLc9V75H+F5Y7pgYMcg+Lg5QwHgznd3wd1FhV0z74aHgV6ixkAIga2nr+JGqf2vNF5UUgZ3F/v8nBSi8rB8B5OXlwcfHx/k5ubC27vmxzSJyDYqwo8lKnoPFvx5Gl/tSQJwq2v8mW8OYfuZDKvUKJcXh7RHqybuKNMK6dbTvjn3QKVUoO/CHRads22AB3bOvBtA+Rw1rs4qCCEQfzkP7YM9pZ6Nb/cl442b89FUFv1ANzT3dcMTVeav+XVKJPq09q9VDeev5uOTmAtIzizEsZScGtsO6hiIRQ9GIMhLXavByUIIvL4uHj8cSMHxN4biekEJdp69irc3nsV3T/fFne0DpesGytea+vlQKl4a0t6swc8VcotK4eaigouT0uwB1ED5H3hNqQ5+BuYJMvS7ET9/uMHbe6aUlOn0nl4DgCs5N/DFX/9g2qAwNPE070nAyub9fgor/07GuqgBuC3U1+LzmMOcv98MKkRUJ5X/Z/z7tAFYdSgVPx5IwbRBYZg0oDUuZBSgX9smAMr/sMZfzkWrJh56j1iXaXXYEJeGfm2aIMSnvGdn2o9H8edJ44sh1sYdYQHYk1i/t4bqQws/N1zKLl8M8oNHumPP+etYc+yytP/NMV0w/4/TZp83edFofLT9PD7cfg6xs+5GsLerFIQq/oBfL9DUelByZWO7N8PIriGY+sNRzBreEVMHtsONUi3UTkqM/2I/8ovLkJ5XjBwLF/ysqB8ASrU6OKuUeGN9PIpLtZg3NhyXs28gMaNAWiZi/thwnLiUgzVHL6NVE3dM6t9a+jfz93DB0blDa/U9+729HVfzNDj8+hAE3AwLQghsjk83uiTF8TeGmpxxOTWrCE19XFGmE/jr/HVM/vYw7u/RHB8+ehuA8jFNPRdsk9p/NbE3Bnc2vOCpKRW/w16uToibN9yic5iLQYWI6k3loGLNgYIH/snEo5/vN+uYB3o0l/5gL3usJ0ZHNMX0n46hVRN3PHtXW1zL16BEq8OIxX9ZrU7pe/dsjjVHL5tuaMdMrQz+87O3m/2ZOLL/u68rBICHe7WAq7MKa45ewoxfTmBAWBOMu6057mofiNujLesZmzW8I0Z2DUF+cRnGLduLqEHt8K8+LXHnu6bHs/Rt7Y+DyVnVtv/5/B01zg2UWaCBl6sznFUK6bbg2QUjpEkZAaBvG38cTMrCmuf6Y92xy/BQO+HJAa0R5GXdaREcLqgsW7YM7733HtLT09G9e3csWbIEffv2NXkcgwqRfTibnodgL1eD3d+WupJzA/0X7ay2fcbQDricfQMuTkpEDQqDr3v5ys/mDOasy+2qCm7OKmx/eSAG3KwxedFoJF8vrLa+Ul092jsUT/RvhdEf19+YHXJcXq5O+GpiH8QkZOCTGOs9Km/tp5UcKqj8/PPPeOKJJ/Dpp5+iX79+WLx4MX799VckJCQgKCioxmMZVIgatt9PXIGrkxKDOwdj19kMdGvhY7WnofZdyESovxvGLt2LrMKSavtHhIegiacL5o0Nh5NSAU2ZDoWaMjiplPBxc5ba5d4ohbuLSprkbf4fp7Bib3Ktavjyid7oEOyFXw6nIqKFD4aFh0CnEyjR6rDn/HX4e7qgZ0s/ANYJV1WN6d4Mf5y4YvXzUsPTqINKv3790KdPHyxduhQAoNPpEBoaiueffx6zZ8+u8VgGFSKyhhOpOQj2doW/hwuKSsqsvmKzJYM0q/os9gKiN5U/kaRQAO891B0ju4bgw23n8OWeJKyLGgB3FxWGfbhb77jEhSPhpFLieoEGp67kIelaASYNuPXIc0J6PoYv1j/GlCGdg9CqiQe+2pOEsd2b4aN/3XazrvJrjL+ciwBPNUZ8tLtOY04qrJ4SiY92nMdf56+jZ0tf/Da1P3afv15tQcvamjYoDEt3JUrvH+vXEoeTszBjaEdM+f5InesFgH/1CcWqQ6kAgDXP9UdxqRaPfXHAKueub8fmDrVqbyngQEGlpKQE7u7uWL16Ne677z5p+8SJE5GTk4P169frtddoNNBobs1bkJeXh9DQUAYVImo0dDoBrRBGp+nX6gSuF2gs6nnKLSrFoeQs3NMpCEqlAoWaMrg4KeGsKn8iJquwxOynS9Jzi1GgKUOApwt83V1QXKqFpkwHV2clhADyikuRd6MUn8b+gycHtEZ4s9qvv7QpLg1TfziKh3u1wLHUHKyY1AfNfd0AAJmFJdXWxDqakg1NqQ6R7ZoYPWdecSnGLd2Lq3nFKCrRomtzb8RfzqvW7skBreF2c76hinE7cfOGYcySPfi/+7rhjvYB1Y75LPYCcm6UIiNPg/t7NMdPh1Kw4WQalj7WA6uPXEJMwjW0D/LEsPBguDqpsCk+Hf+ObIXerfzQzNcNbs4qpOcVw9fdGaVaIfXsFZdq9caZGHN/j+YoKinDllPlMy6vjxqA+Cu56NXKD2OW7MHTd7TF5DvbID2vGKM/3oOoQe0wa3gnk+e1hMMElStXrqB58+b4+++/ERkZKW1/5ZVXEBsbiwMH9NPnvHnzMH/+/GrnYVAhIiJyHOYEFYda62fOnDnIzc2VvlJTU+UuiYiIiGxI1mnoAgICoFKpcPWq/sJfV69eRUhISLX2arUaarXlk9oQERGRY5G1R8XFxQW9evXCjh23nkPX6XTYsWOH3q0gIiIiapxkn9h/xowZmDhxInr37o2+ffti8eLFKCwsxJNPPil3aURERCQz2YPKo48+imvXruGNN95Aeno6brvtNmzevBnBwZZNBUxEREQNh+zzqNQF51EhIiJyPA32qR8iIiJqXBhUiIiIyG4xqBAREZHdYlAhIiIiu8WgQkRERHaLQYWIiIjsFoMKERER2S0GFSIiIrJbss9MWxcVc9Xl5eXJXAkRERHVVsXf7drMOevQQSU/Px8AEBoaKnMlREREZK78/Hz4+PjU2Mahp9DX6XS4cuUKvLy8oFAorHruvLw8hIaGIjU1tUFOz8/rc3wN/Rp5fY6toV8f0PCv0ZbXJ4RAfn4+mjVrBqWy5lEoDt2jolQq0aJFC5t+D29v7wb5A1iB1+f4Gvo18vocW0O/PqDhX6Otrs9UT0oFDqYlIiIiu8WgQkRERHaLQcUItVqNN998E2q1Wu5SbILX5/ga+jXy+hxbQ78+oOFfo71cn0MPpiUiIqKGjT0qREREZLcYVIiIiMhuMagQERGR3WJQISIiIrvFoGLAsmXL0Lp1a7i6uqJfv344ePCg3CUZNG/ePCgUCr2vTp06SfuLi4sRFRWFJk2awNPTEw8++CCuXr2qd46UlBSMHj0a7u7uCAoKwqxZs1BWVqbXJiYmBj179oRarUZYWBhWrlxpk+vZvXs3xowZg2bNmkGhUGDdunV6+4UQeOONN9C0aVO4ublhyJAhOH/+vF6brKwsTJgwAd7e3vD19cXTTz+NgoICvTYnT57EnXfeCVdXV4SGhuLdd9+tVsuvv/6KTp06wdXVFd26dcPGjRttfn2TJk2q9nmOGDHCYa4vOjoaffr0gZeXF4KCgnDfffchISFBr019/kxa+/e4Ntd39913V/sMp0yZ4hDXBwDLly9HRESENMFXZGQkNm3aJO135M+vNtfn6J9fVYsWLYJCocCLL74obXPIz1CQnlWrVgkXFxfx9ddfi1OnTonJkycLX19fcfXqVblLq+bNN98U4eHhIi0tTfq6du2atH/KlCkiNDRU7NixQxw+fFjcfvvton///tL+srIy0bVrVzFkyBBx7NgxsXHjRhEQECDmzJkjtfnnn3+Eu7u7mDFjhjh9+rRYsmSJUKlUYvPmzVa/no0bN4rXXntNrFmzRgAQa9eu1du/aNEi4ePjI9atWydOnDghxo4dK9q0aSNu3LghtRkxYoTo3r272L9/v/jrr79EWFiYGD9+vLQ/NzdXBAcHiwkTJoj4+Hjx008/CTc3N/HZZ59Jbfbu3StUKpV49913xenTp8Xrr78unJ2dRVxcnE2vb+LEiWLEiBF6n2dWVpZeG3u+vuHDh4sVK1aI+Ph4cfz4cTFq1CjRsmVLUVBQILWpr59JW/we1+b6Bg4cKCZPnqz3Gebm5jrE9QkhxO+//y42bNggzp07JxISEsR///tf4ezsLOLj44UQjv351eb6HP3zq+zgwYOidevWIiIiQrzwwgvSdkf8DBlUqujbt6+IioqS3mu1WtGsWTMRHR0tY1WGvfnmm6J79+4G9+Xk5AhnZ2fx66+/StvOnDkjAIh9+/YJIcr/cCqVSpGeni61Wb58ufD29hYajUYIIcQrr7wiwsPD9c796KOPiuHDh1v5avRV/UOu0+lESEiIeO+996RtOTk5Qq1Wi59++kkIIcTp06cFAHHo0CGpzaZNm4RCoRCXL18WQgjxySefCD8/P+n6hBDi1VdfFR07dpTeP/LII2L06NF69fTr10/85z//sdn1CVEeVMaNG2f0GEe6PiGEyMjIEABEbGysEKJ+fybr4/e46vUJUf6HrvIfhaoc6foq+Pn5iS+//LLBfX5Vr0+IhvP55efni/bt24tt27bpXZOjfoa89VNJSUkJjhw5giFDhkjblEolhgwZgn379slYmXHnz59Hs2bN0LZtW0yYMAEpKSkAgCNHjqC0tFTvWjp16oSWLVtK17Jv3z5069YNwcHBUpvhw4cjLy8Pp06dktpUPkdFm/r+90hKSkJ6erpeLT4+PujXr5/e9fj6+qJ3795SmyFDhkCpVOLAgQNSm7vuugsuLi5Sm+HDhyMhIQHZ2dlSG7muOSYmBkFBQejYsSOmTp2KzMxMaZ+jXV9ubi4AwN/fH0D9/UzW1+9x1eur8MMPPyAgIABdu3bFnDlzUFRUJO1zpOvTarVYtWoVCgsLERkZ2eA+v6rXV6EhfH5RUVEYPXp0tToc9TN06EUJre369evQarV6HxAABAcH4+zZszJVZVy/fv2wcuVKdOzYEWlpaZg/fz7uvPNOxMfHIz09HS4uLvD19dU7Jjg4GOnp6QCA9PR0g9dasa+mNnl5ebhx4wbc3NxsdHX6KuoxVEvlWoOCgvT2Ozk5wd/fX69NmzZtqp2jYp+fn5/Ra644h62MGDECDzzwANq0aYMLFy7gv//9L0aOHIl9+/ZBpVI51PXpdDq8+OKLGDBgALp27Sp9//r4mczOzrb577Gh6wOAxx57DK1atUKzZs1w8uRJvPrqq0hISMCaNWsc5vri4uIQGRmJ4uJieHp6Yu3atejSpQuOHz/eID4/Y9cHNIzPb9WqVTh69CgOHTpUbZ+j/g4yqDiwkSNHSq8jIiLQr18/tGrVCr/88ku9BQiynn/961/S627duiEiIgLt2rVDTEwMBg8eLGNl5ouKikJ8fDz27Nkjdyk2Yez6nn32Wel1t27d0LRpUwwePBgXLlxAu3bt6rtMi3Ts2BHHjx9Hbm4uVq9ejYkTJyI2NlbusqzG2PV16dLF4T+/1NRUvPDCC9i2bRtcXV3lLsdqeOunkoCAAKhUqmojoK9evYqQkBCZqqo9X19fdOjQAYmJiQgJCUFJSQlycnL02lS+lpCQEIPXWrGvpjbe3t71GoYq6qnpswkJCUFGRobe/rKyMmRlZVnlmuv7Z6Bt27YICAhAYmKiVJcjXN+0adPw559/YteuXWjRooW0vb5+Jm39e2zs+gzp168fAOh9hvZ+fS4uLggLC0OvXr0QHR2N7t2746OPPmown5+x6zPE0T6/I0eOICMjAz179oSTkxOcnJwQGxuLjz/+GE5OTggODnbIz5BBpRIXFxf06tULO3bskLbpdDrs2LFD7x6mvSooKMCFCxfQtGlT9OrVC87OznrXkpCQgJSUFOlaIiMjERcXp/fHb9u2bfD29pa6QiMjI/XOUdGmvv892rRpg5CQEL1a8vLycODAAb3rycnJwZEjR6Q2O3fuhE6nk/6HExkZid27d6O0tFRqs23bNnTs2BF+fn5SG3u45kuXLiEzMxNNmzaV6rLn6xNCYNq0aVi7di127txZ7RZUff1M2ur32NT1GXL8+HEA0PsM7fX6jNHpdNBoNA7/+Zm6PkMc7fMbPHgw4uLicPz4cemrd+/emDBhgvTaIT9Ds4ffNnCrVq0SarVarFy5Upw+fVo8++yzwtfXV28EtL14+eWXRUxMjEhKShJ79+4VQ4YMEQEBASIjI0MIUf4YWsuWLcXOnTvF4cOHRWRkpIiMjJSOr3gMbdiwYeL48eNi8+bNIjAw0OBjaLNmzRJnzpwRy5Yts9njyfn5+eLYsWPi2LFjAoD44IMPxLFjx8TFixeFEOWPJ/v6+or169eLkydPinHjxhl8PLlHjx7iwIEDYs+ePaJ9+/Z6j+/m5OSI4OBg8e9//1vEx8eLVatWCXd392qP7zo5OYn3339fnDlzRrz55ptWeXy3puvLz88XM2fOFPv27RNJSUli+/btomfPnqJ9+/aiuLjYIa5v6tSpwsfHR8TExOg93llUVCS1qa+fSVv8Hpu6vsTERPHWW2+Jw4cPi6SkJLF+/XrRtm1bcddddznE9QkhxOzZs0VsbKxISkoSJ0+eFLNnzxYKhUJs3bpVCOHYn5+p62sIn58hVZ9kcsTPkEHFgCVLloiWLVsKFxcX0bdvX7F//365SzLo0UcfFU2bNhUuLi6iefPm4tFHHxWJiYnS/hs3bojnnntO+Pn5CXd3d3H//feLtLQ0vXMkJyeLkSNHCjc3NxEQECBefvllUVpaqtdm165d4rbbbhMuLi6ibdu2YsWKFTa5nl27dgkA1b4mTpwohCh/RHnu3LkiODhYqNVqMXjwYJGQkKB3jszMTDF+/Hjh6ekpvL29xZNPPiny8/P12pw4cULccccdQq1Wi+bNm4tFixZVq+WXX34RHTp0EC4uLiI8PFxs2LDBptdXVFQkhg0bJgIDA4Wzs7No1aqVmDx5crVfanu+PkPXBkDv56U+fyat/Xts6vpSUlLEXXfdJfz9/YVarRZhYWFi1qxZevNw2PP1CSHEU089JVq1aiVcXFxEYGCgGDx4sBRShHDsz8/U9TWEz8+QqkHFET9DhRBCmN8PQ0RERGR7HKNCREREdotBhYiIiOwWgwoRERHZLQYVIiIislsMKkRERGS3GFSIiIjIbjGoEBERkd1iUCEiIiK7xaBCRHXSunVrLF68uNbtY2JioFAoqi2MRkRkCGemJWpk7r77btx2221mhYuaXLt2DR4eHnB3d69V+5KSEmRlZSE4OBgKhcIqNZgrJiYGgwYNQnZ2Nnx9fWWpgYhqx0nuAojI/gghoNVq4eRk+n8RgYGBZp3bxcWlzsvZE1HjwVs/RI3IpEmTEBsbi48++ggKhQIKhQLJycnS7ZhNmzahV69eUKvV2LNnDy5cuIBx48YhODgYnp6e6NOnD7Zv3653zqq3fhQKBb788kvcf//9cHd3R/v27fH7779L+6ve+lm5ciV8fX2xZcsWdO7cGZ6enhgxYgTS0tKkY8rKyjB9+nT4+vqiSZMmePXVVzFx4kTcd999Rq/14sWLGDNmDPz8/ODh4YHw8HBs3LgRycnJGDRoEADAz88PCoUCkyZNAlC+FH10dDTatGkDNzc3dO/eHatXr65W+4YNGxAREQFXV1fcfvvtiI+Pt/ATISJTGFSIGpGPPvoIkZGRmDx5MtLS0pCWlobQ0FBp/+zZs7Fo0SKcOXMGERERKCgowKhRo7Bjxw4cO3YMI0aMwJgxY5CSklLj95k/fz4eeeQRnDx5EqNGjcKECROQlZVltH1RURHef/99fPfdd9i9ezdSUlIwc+ZMaf8777yDH374AStWrMDevXuRl5eHdevW1VhDVFQUNBoNdu/ejbi4OLzzzjvw9PREaGgofvvtNwBAQkIC0tLS8NFHHwEAoqOj8e233+LTTz/FqVOn8NJLL+Hxxx9HbGys3rlnzZqF//3vfzh06BACAwMxZswYlJaW1lgPEVnIojWXichhVV32XYjyJdsBiHXr1pk8Pjw8XCxZskR636pVK/Hhhx9K7wGI119/XXpfUFAgAIhNmzbpfa/s7GwhhBArVqwQAERiYqJ0zLJly0RwcLD0Pjg4WLz33nvS+7KyMtGyZUsxbtw4o3V269ZNzJs3z+C+qjUIIURxcbFwd3cXf//9t17bp59+WowfP17vuFWrVkn7MzMzhZubm/j555+N1kJEluMYFSKS9O7dW+99QUEB5s2bhw0bNiAtLQ1lZWW4ceOGyR6ViIgI6bWHhwe8vb2RkZFhtL27uzvatWsnvW/atKnUPjc3F1evXkXfvn2l/SqVCr169YJOpzN6zunTp2Pq1KnYunUrhgwZggcffFCvrqoSExNRVFSEoUOH6m0vKSlBjx499LZFRkZKr/39/dGxY0ecOXPG6LmJyHIMKkQk8fDw0Hs/c+ZMbNu2De+//z7CwsLg5uaGhx56CCUlJTWex9nZWe+9QqGoMVQYai/q+EDiM888g+HDh2PDhg3YunUroqOj8b///Q/PP/+8wfYFBQUAgA0bNqB58+Z6+9RqdZ1qISLLcYwKUSPj4uICrVZbq7Z79+7FpEmTcP/996Nbt24ICQlBcnKybQuswsfHB8HBwTh06JC0TavV4ujRoyaPDQ0NxZQpU7BmzRq8/PLL+OKLLwCU/xtUnKdCly5doFarkZKSgrCwML2vyuN4AGD//v3S6+zsbJw7dw6dO3eu03USkWHsUSFqZFq3bo0DBw4gOTkZnp6e8Pf3N9q2ffv2WLNmDcaMGQOFQoG5c+fW2DNiK88//zyio6MRFhaGTp06YcmSJcjOzq5xHpYXX3wRI0eORIcOHZCdnY1du3ZJYaJVq1ZQKBT4888/MWrUKLi5ucHLywszZ87ESy+9BJ1OhzvuuAO5ubnYu3cvvL29MXHiROncb731Fpo0aYLg4GC89tprCAgIqPEJJCKyHHtUiBqZmTNnQqVSoUuXLggMDKxxvMkHH3wAPz8/9O/fH2PGjMHw4cPRs2fPeqy23Kuvvorx48fjiSeeQGRkJDw9PTF8+HC4uroaPUar1SIqKgqdO3fGiBEj0KFDB3zyyScAgObNm2P+/PmYPXs2goODMW3aNADAggULMHfuXERHR0vHbdiwAW3atNE796JFi/DCCy+gV69eSE9Pxx9//CH10hCRdXFmWiJyODqdDp07d8YjjzyCBQsW1Nv35Yy2RPWPt36IyO5dvHgRW7duxcCBA6HRaLB06VIkJSXhsccek7s0IrIx3vohIrunVCqxcuVK9OnTBwMGDEBcXBy2b9/OAaxEjQBv/RAREZHdYo8KERER2S0GFSIiIrJbDCpERERktxhUiIiIyG4xqBAREZHdYlAhIiIiu8WgQkRERHaLQYWIiIjs1v8Dg8z/ALe9yNkAAAAASUVORK5CYII=",
      "text/plain": [
       "<Figure size 640x480 with 1 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "from tqdm import trange\n",
    "import matplotlib.pyplot as plt\n",
    "from torch.optim import Adam\n",
    "import numpy as np\n",
    "\n",
    "# 训练序列到序列模型\n",
    "def train_seq2seq_mt(train_data, encoder, decoder, epochs=20,\\\n",
    "        learning_rate=1e-3):\n",
    "    # 准备模型和优化器\n",
    "    encoder_optimizer = Adam(encoder.parameters(), lr=learning_rate)\n",
    "    decoder_optimizer = Adam(decoder.parameters(), lr=learning_rate)\n",
    "    criterion = nn.NLLLoss()\n",
    "\n",
    "    encoder.train()\n",
    "    decoder.train()\n",
    "    encoder.zero_grad()\n",
    "    decoder.zero_grad()\n",
    "\n",
    "    step_losses = []\n",
    "    plot_losses = []\n",
    "    with trange(n_epochs, desc='epoch', ncols=60) as pbar:\n",
    "        for epoch in pbar:\n",
    "            np.random.shuffle(train_data)\n",
    "            for step, data in enumerate(train_data):\n",
    "                # 将源序列和目标序列转为 1 * seq_len 的tensor\n",
    "                # 这里为了简单实现，采用了批次大小为1，\n",
    "                # 当批次大小大于1时，编码器需要进行填充\n",
    "                # 并且返回最后一个非填充词的隐状态，\n",
    "                # 解码也需要进行相应的处理\n",
    "                input_ids, target_ids = data\n",
    "                input_tensor, target_tensor = \\\n",
    "                    torch.tensor(input_ids).unsqueeze(0),\\\n",
    "                    torch.tensor(target_ids).unsqueeze(0)\n",
    "\n",
    "                encoder_optimizer.zero_grad()\n",
    "                decoder_optimizer.zero_grad()\n",
    "\n",
    "                encoder_outputs, encoder_hidden = encoder(input_tensor)\n",
    "                # 输入目标序列用于teacher forcing训练\n",
    "                decoder_outputs, _, _ = decoder(encoder_outputs,\\\n",
    "                    encoder_hidden, target_tensor)\n",
    "\n",
    "                loss = criterion(\n",
    "                    decoder_outputs.view(-1, decoder_outputs.size(-1)),\n",
    "                    target_tensor.view(-1)\n",
    "                )\n",
    "                pbar.set_description(f'epoch-{epoch}, '+\\\n",
    "                    f'loss={loss.item():.4f}')\n",
    "                step_losses.append(loss.item())\n",
    "                # 实际训练批次为1，训练损失波动过大\n",
    "                # 将多步损失求平均可以得到更平滑的训练曲线，便于观察\n",
    "                plot_losses.append(np.mean(step_losses[-32:]))\n",
    "                loss.backward()\n",
    "\n",
    "                encoder_optimizer.step()\n",
    "                decoder_optimizer.step()\n",
    "\n",
    "    plot_losses = np.array(plot_losses)\n",
    "    plt.plot(range(len(plot_losses)), plot_losses)\n",
    "    plt.xlabel('training step')\n",
    "    plt.ylabel('loss')\n",
    "    plt.show()\n",
    "\n",
    "    \n",
    "hidden_size = 128\n",
    "n_epochs = 20\n",
    "learning_rate = 1e-3\n",
    "\n",
    "encoder = RNNEncoder(input_lang.n_words, hidden_size)\n",
    "decoder = AttnRNNDecoder(output_lang.n_words, hidden_size)\n",
    "\n",
    "train_seq2seq_mt(train_data, encoder, decoder, n_epochs, learning_rate)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "be47e115",
   "metadata": {},
   "source": [
    "下面实现贪心搜索解码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "678192b3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input： 跨 境 电 商 基 础 与 实 务 （ 微 课 版 ）\n",
      "target： cross-border power provider basics and practices (microcurricular version)\n",
      "pred： cross-border power provider basics and practices (microcurricular version)\n",
      "\n",
      "input： 引 爆 品 牌 3 3 课\n",
      "target： detonating brand 33 lessons .\n",
      "pred： detonating brand 33 lessons .\n",
      "\n",
      "input： 图 解 语 音 识 别\n",
      "target： diagonal voice recognition\n",
      "pred： diagonal voice recognition\n",
      "\n",
      "input： g i t l a b c i / c d 从 入 门 到 实 战\n",
      "target： gitlab ci/cd from the entry point to the field .\n",
      "pred： arduino from entry to field .\n",
      "\n",
      "input： 计 算 机 与 互 联 网\n",
      "target： computers and the internet\n",
      "pred： computers and the internet theory , internet and the implementation of information security and maintenance\n",
      "\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "代码修改自GitHub项目pytorch/tutorials\n",
    "（Copyright (c) 2023, PyTorch, BSD-3-Clause License（见附录））\n",
    "\"\"\"\n",
    "def greedy_decode(encoder, decoder, sentence, input_lang, output_lang):\n",
    "    with torch.no_grad():\n",
    "        # 将源序列转为 1 * seq_length 的tensor\n",
    "        input_ids = input_lang.sent2ids(sentence)\n",
    "        input_tensor = torch.tensor(input_ids).unsqueeze(0)\n",
    "        \n",
    "        encoder_outputs, encoder_hidden = encoder(input_tensor)\n",
    "        decoder_outputs, decoder_hidden, decoder_attn = \\\n",
    "            decoder(encoder_outputs, encoder_hidden)\n",
    "        \n",
    "        # 取出每一步预测概率最大的词\n",
    "        _, topi = decoder_outputs.topk(1)\n",
    "        \n",
    "        decoded_ids = []\n",
    "        for idx in topi.squeeze():\n",
    "            if idx.item() == EOS_token:\n",
    "                break\n",
    "            decoded_ids.append(idx.item())\n",
    "    return output_lang.ids2sent(decoded_ids), decoder_attn\n",
    "            \n",
    "encoder.eval()\n",
    "decoder.eval()\n",
    "for i in range(5):\n",
    "    pair = random.choice(pairs)\n",
    "    print('input：', pair[0])\n",
    "    print('target：', pair[1])\n",
    "    output_sentence, _ = greedy_decode(encoder, decoder, pair[0],\n",
    "        input_lang, output_lang)\n",
    "    print('pred：', output_sentence)\n",
    "    print('')\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e38320f0",
   "metadata": {},
   "source": [
    "\n",
    "接下来使用束搜索解码来验证模型。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "3496efa3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "input： h t m l 5 / c s s 3 / j a v a s c r i p t 技 术 大 全\n",
      "target： html5/css3/javascript technology majority\n",
      "pred： html5/css3/javascript technology majority\n",
      "\n",
      "input： 势 不 可 挡 3 4 位 成 功 的 职 业 教 练 和 企 业 家 教 你 如 何 达 成 高 绩 效\n",
      "target： unstoppable . 34 successful career coaches and entrepreneurs taught you how to achieve high performance .\n",
      "pred： how to find more efficiently and entrepreneurs taught you how to achieve high performance . ed .\n",
      "\n",
      "input： 计 算 机 组 装 与 维 护 案 例 教 程 （ 微 课 版 ）\n",
      "target： computer assembly and maintenance case curriculum (micro-pedagogical version)\n",
      "pred： computer assembly and maintenance case studies (micro-pedagogical version)\n",
      "\n",
      "input： 高 效 阅 读 培 养 终 身 受 用 的 阅 读 力\n",
      "target： to read efficiently , to develop the ability to read for the rest of your life .\n",
      "pred： to read efficiently , to develop the ability to read for the rest of your life .\n",
      "\n",
      "input： 胖 胖 集 趣 味 水 墨 诗 画 绘\n",
      "target： fat , fat , fun , ink , poetry .\n",
      "pred： fat , fat , fun , ink , poetry .\n",
      "\n"
     ]
    }
   ],
   "source": [
    "# 定义容器类用于管理所有的候选结果\n",
    "class BeamHypotheses:\n",
    "    def __init__(self, num_beams, max_length):\n",
    "        self.max_length = max_length\n",
    "        self.num_beams = num_beams\n",
    "        self.beams = []\n",
    "        self.worst_score = 1e9\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.beams)\n",
    "    \n",
    "    # 添加一个候选结果，更新最差得分\n",
    "    def add(self, sum_logprobs, hyp, hidden):\n",
    "        score = sum_logprobs / max(len(hyp), 1)\n",
    "        if len(self) < self.num_beams or score > self.worst_score:\n",
    "            # 可更新的情况：数量未饱和或超过最差得分\n",
    "            self.beams.append((score, hyp, hidden))\n",
    "            if len(self) > self.num_beams:\n",
    "                # 数量饱和需要删掉一个最差的\n",
    "                sorted_scores = sorted([(s, idx) for idx,\\\n",
    "                    (s, _, _) in enumerate(self.beams)])\n",
    "                del self.beams[sorted_scores[0][1]]\n",
    "                self.worst_score = sorted_scores[1][0]\n",
    "            else:\n",
    "                self.worst_score = min(score, self.worst_score)\n",
    "    \n",
    "    # 取出一个未停止的候选结果，第一个返回值表示是否成功取出，\n",
    "    # 如成功，则第二个值为目标候选结果\n",
    "    def pop(self):\n",
    "        if len(self) == 0:\n",
    "            return False, None\n",
    "        for i, (s, hyp, hid) in enumerate(self.beams):\n",
    "            # 未停止的候选结果需满足：长度小于最大解码长度；不以<eos>结束\n",
    "            if len(hyp) < self.max_length and (len(hyp) == 0\\\n",
    "                    or hyp[-1] != EOS_token):\n",
    "                del self.beams[i]\n",
    "                if len(self) > 0:\n",
    "                    sorted_scores = sorted([(s, idx) for idx,\\\n",
    "                        (s, _, _) in enumerate(self.beams)])\n",
    "                    self.worst_score = sorted_scores[0][0]\n",
    "                else:\n",
    "                    self.worst_score = 1e9\n",
    "                return True, (s, hyp, hid)\n",
    "        return False, None\n",
    "    \n",
    "    # 取出分数最高的候选结果，第一个返回值表示是否成功取出，\n",
    "    # 如成功，则第二个值为目标候选结果\n",
    "    def pop_best(self):\n",
    "        if len(self) == 0:\n",
    "            return False, None\n",
    "        sorted_scores = sorted([(s, idx) for idx, (s, _, _)\\\n",
    "            in enumerate(self.beams)])\n",
    "        return True, self.beams[sorted_scores[-1][1]]\n",
    "\n",
    "\n",
    "def beam_search_decode(encoder, decoder, sentence, input_lang,\n",
    "        output_lang, num_beams=3):\n",
    "    with torch.no_grad():\n",
    "        # 将源序列转为 1 * seq_length 的tensor\n",
    "        input_ids = input_lang.sent2ids(sentence)\n",
    "        input_tensor = torch.tensor(input_ids).unsqueeze(0)\n",
    "\n",
    "        # 在容器中插入一个空的候选结果\n",
    "        encoder_outputs, encoder_hidden = encoder(input_tensor)\n",
    "        init_hyp = []\n",
    "        hypotheses = BeamHypotheses(num_beams, MAX_LENGTH)\n",
    "        hypotheses.add(0, init_hyp, encoder_hidden)\n",
    "\n",
    "        while True:\n",
    "            # 每次取出一个未停止的候选结果\n",
    "            flag, item = hypotheses.pop()\n",
    "            if not flag:\n",
    "                break\n",
    "                \n",
    "            score, hyp, decoder_hidden = item\n",
    "            \n",
    "            # 当前解码器输入\n",
    "            if len(hyp) > 0:\n",
    "                decoder_input = torch.empty(1, 1,\\\n",
    "                    dtype=torch.long).fill_(hyp[-1])\n",
    "            else:\n",
    "                decoder_input = torch.empty(1, 1,\\\n",
    "                    dtype=torch.long).fill_(SOS_token)\n",
    "\n",
    "            # 解码一步\n",
    "            decoder_output, decoder_hidden, _ = decoder.forward_step(\n",
    "                decoder_input, decoder_hidden, encoder_outputs\n",
    "            )\n",
    "\n",
    "            # 从输出分布中取出前k个结果\n",
    "            topk_values, topk_ids = decoder_output.topk(num_beams)\n",
    "            # 生成并添加新的候选结果到容器\n",
    "            for logp, token_id in zip(topk_values.squeeze(),\\\n",
    "                    topk_ids.squeeze()):\n",
    "                sum_logprobs = score * len(hyp) + logp.item()\n",
    "                new_hyp = hyp + [token_id.item()]\n",
    "                hypotheses.add(sum_logprobs, new_hyp, decoder_hidden)\n",
    "\n",
    "        flag, item = hypotheses.pop_best()\n",
    "        if flag:\n",
    "            hyp = item[1]\n",
    "            if hyp[-1] == EOS_token:\n",
    "                del hyp[-1]\n",
    "            return output_lang.ids2sent(hyp)\n",
    "        else:\n",
    "            return ''\n",
    "\n",
    "encoder.eval()\n",
    "decoder.eval()\n",
    "for i in range(5):\n",
    "    pair = random.choice(pairs)\n",
    "    print('input：', pair[0])\n",
    "    print('target：', pair[1])\n",
    "    output_sentence = beam_search_decode(encoder, decoder,\\\n",
    "        pair[0], input_lang, output_lang)\n",
    "    print('pred：', output_sentence)\n",
    "    print('')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "29a7afd2",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
