{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "f2ff7f77",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "sys.version_info(major=3, minor=12, micro=3, releaselevel='final', serial=0)\n",
      "matplotlib 3.10.0\n",
      "numpy 1.26.4\n",
      "pandas 2.2.3\n",
      "sklearn 1.6.0\n",
      "torch 2.5.1+cpu\n",
      "cpu\n"
     ]
    }
   ],
   "source": [
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "import numpy as np\n",
    "import sklearn\n",
    "import pandas as pd\n",
    "import os\n",
    "import sys\n",
    "import time\n",
    "from tqdm.auto import tqdm\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, torch:\n",
    "    print(module.__name__, module.__version__)\n",
    "\n",
    "device = torch.device(\"cuda:0\") if torch.cuda.is_available() else torch.device(\"cpu\")\n",
    "print(device)\n",
    "\n",
    "seed = 42\n",
    "torch.manual_seed(seed)\n",
    "torch.cuda.manual_seed_all(seed)\n",
    "np.random.seed(seed)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4a7fe48a",
   "metadata": {},
   "source": [
    "# 1. preprocessing data "
   ]
  },
  {
   "cell_type": "markdown",
   "id": "492f2369",
   "metadata": {},
   "source": [
    "## 数据加载"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "initial_id",
   "metadata": {
    "collapsed": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "May I borrow, this book?\n",
      "¿Puedo tomar prestado este libro?\n"
     ]
    }
   ],
   "source": [
    "import unicodedata\n",
    "import re\n",
    "\n",
    "\n",
    "#因为西班牙语有一些是特殊字符，所以我们需要unicode转ascii，\n",
    "# 这样值变小了，因为unicode太大\n",
    "def unicode_to_ascii(s):\n",
    "    #NFD是转换方法，把每一个字节拆开，Mn是重音，所以去除\n",
    "    return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')\n",
    "\n",
    "#下面我们找个样本测试一下\n",
    "# 加u代表对字符串进行unicode编码\n",
    "en_sentence = u\"May I borrow, this book?\"\n",
    "sp_sentence = u\"¿Puedo tomar prestado este libro?\"\n",
    "\n",
    "print(unicode_to_ascii(en_sentence))\n",
    "print(unicode_to_ascii(sp_sentence))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "ae160f03",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "may i borrow , this book ?\n",
      "¿ puedo tomar prestado este libro ?\n",
      "b'\\xc2\\xbf puedo tomar prestado este libro ?'\n"
     ]
    }
   ],
   "source": [
    "def preprocess_sentence(w):\n",
    "    #变为小写，去掉多余的空格，变成小写，id少一些\n",
    "    w = unicode_to_ascii(w.lower().strip())\n",
    "\n",
    "    # 在单词与跟在其后的标点符号之间插入一个空格\n",
    "    # eg: \"he is a boy.\" => \"he is a boy . \"\n",
    "    # Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation\n",
    "    w = re.sub(r\"([?.!,¿])\", r\" \\1 \", w)\n",
    "\n",
    "\n",
    "    # 除了 (a-z, A-Z, \".\", \"?\", \"!\", \",\")，将所有字符替换为空格，你可以保留一些标点符号\n",
    "    w = re.sub(r\"[^a-zA-Z?.!,¿]+\", \" \", w)\n",
    "\n",
    "    #因为可能有多余空格，替换为一个空格，所以处理一下\n",
    "    w = re.sub(r'[\" \"]+', \" \", w)\n",
    "\n",
    "    w = w.strip() #strip是去掉两边的空格\n",
    "\n",
    "    return w\n",
    "\n",
    "print(preprocess_sentence(en_sentence))\n",
    "print(preprocess_sentence(sp_sentence))\n",
    "print(preprocess_sentence(sp_sentence).encode('utf-8'))  #¿是占用两个字节的"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8ebb0b91",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array(['train', 'test', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'test', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'test', 'test',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'test', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'test', 'train', 'test', 'train', 'train', 'test',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'test',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train', 'train', 'train', 'train', 'train', 'train',\n",
       "       'train', 'train'], dtype='<U5')"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#划分训练集和测试集的一个方法\n",
    "split_index1 = np.random.choice(a=[\"train\", \"test\"], replace=True, p=[0.9, 0.1], size=100)\n",
    "split_index1"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "12de193a",
   "metadata": {},
   "source": [
    "## Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "4165b75f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "从缓存文件加载train数据...\n",
      "从缓存文件加载test数据...\n"
     ]
    }
   ],
   "source": [
    "# 创建一个继承自torch.utils.data.Dataset的数据集类\n",
    "import torch\n",
    "from torch.utils.data import Dataset\n",
    "import os\n",
    "\n",
    "class TranslationDataset(Dataset):\n",
    "    def __init__(self, path, num_examples=None, split=None):\n",
    "        # 检查是否存在缓存文件\n",
    "        cache_file_en = f'{split}_en_sentences.npy' if split else 'all_en_sentences.npy'\n",
    "        cache_file_sp = f'{split}_sp_sentences.npy' if split else 'all_sp_sentences.npy'\n",
    "        \n",
    "        # 如果缓存文件存在，直接加载\n",
    "        if os.path.exists(cache_file_en) and os.path.exists(cache_file_sp):\n",
    "            print(f\"从缓存文件加载{split}数据...\")\n",
    "            self.trg = np.load(cache_file_en)\n",
    "            self.src = np.load(cache_file_sp)\n",
    "        else:\n",
    "            print(f\"从{path}读取数据并创建{split}数据集...\")\n",
    "            # 读取文件\n",
    "            lines = open(path, encoding='UTF-8').read().strip().split('\\n')\n",
    "            \n",
    "            # 创建空列表存储英语和西班牙语句子对\n",
    "            self.en_sentences = []\n",
    "            self.sp_sentences = []\n",
    "            \n",
    "            # 生成训练集和测试集的索引,如果num_examples为None，则使用所有行，否则使用num_examples行\n",
    "            total_examples = len(lines) if num_examples is None else min(num_examples, len(lines)) \n",
    "            split_index = np.random.choice(a=[\"train\", \"test\"], replace=True, p=[0.9, 0.1], size=total_examples)\n",
    "            \n",
    "            # 遍历每一行，按tab分隔英语和西班牙语\n",
    "            for i, line in enumerate(lines[:total_examples]):\n",
    "                # 如果指定了split，则只保留对应的数据\n",
    "                if split is not None and split_index[i] != split:\n",
    "                    continue\n",
    "                    \n",
    "                # 按tab分隔\n",
    "                en, sp = line.split('\\t')\n",
    "                \n",
    "                # 预处理句子\n",
    "                en = preprocess_sentence(en)\n",
    "                sp = preprocess_sentence(sp)\n",
    "                \n",
    "                # 添加到列表中\n",
    "                self.en_sentences.append(en)\n",
    "                self.sp_sentences.append(sp)\n",
    "            \n",
    "            # 转换为numpy数组\n",
    "            self.trg = np.array(self.en_sentences) #英语\n",
    "            self.src = np.array(self.sp_sentences) #西班牙语\n",
    "            \n",
    "            # 保存为numpy文件进行缓存\n",
    "            np.save(cache_file_en, self.trg)\n",
    "            np.save(cache_file_sp, self.src)\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.trg)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        return self.src[idx],self.trg[idx]\n",
    "    \n",
    "\n",
    "\n",
    "# 从spa.txt创建数据集\n",
    "train_dataset = TranslationDataset('spa.txt', split='train')\n",
    "test_dataset = TranslationDataset('spa.txt', split='test')\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "639e0829",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "source: si quieres sonar como un hablante nativo , debes estar dispuesto a practicar diciendo la misma frase una y otra vez de la misma manera en que un musico de banjo practica el mismo fraseo una y otra vez hasta que lo puedan tocar correctamente y en el tiempo esperado .\n",
      "target: if you want to sound like a native speaker , you must be willing to practice saying the same sentence over and over in the same way that banjo players practice the same phrase over and over until they can play it correctly and at the desired tempo .\n"
     ]
    }
   ],
   "source": [
    "print(\"source: {}\\ntarget: {}\".format(*train_dataset[-1]))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "066c556f",
   "metadata": {},
   "source": [
    "### Tokenizer\n",
    "\n",
    "这里有两种处理方式，分别对应着 encoder 和 decoder 的 word embedding 是否共享，这里实现不共享的方案。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6f347923",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "英语词典大小: 12479\n",
      "西班牙语词典大小: 23729\n"
     ]
    }
   ],
   "source": [
    "from collections import Counter\n",
    "import numpy as np\n",
    "\n",
    "# 构建英语和西班牙语的词典\n",
    "def build_vocab(sentences, min_freq=1):\n",
    "    # 初始化词典，包含特殊标记\n",
    "    word2idx = {\n",
    "        \"[PAD]\": 0,     # 填充 token\n",
    "        \"[BOS]\": 1,     # begin of sentence\n",
    "        \"[UNK]\": 2,     # 未知 token\n",
    "        \"[EOS]\": 3,     # end of sentence\n",
    "    }\n",
    "    \n",
    "    # 使用Counter统计词频\n",
    "    counter = Counter()\n",
    "    for sentence in sentences:\n",
    "        counter.update(sentence.split())\n",
    "    \n",
    "    # 按词频排序并添加到词典中\n",
    "    idx = len(word2idx)\n",
    "    # 返回计数最高的前 n 个元素及其计数，若未指定 n 则返回所有元素\n",
    "    for word, count in counter.most_common():\n",
    "        if count >= min_freq:\n",
    "            word2idx[word] = idx\n",
    "            idx += 1\n",
    "    \n",
    "    return word2idx\n",
    "\n",
    "# 构建英语和西班牙语词典\n",
    "trg_word2idx = build_vocab(train_dataset.trg) #英语\n",
    "src_word2idx = build_vocab(train_dataset.src) #西班牙语\n",
    "\n",
    "# 创建反向映射（索引到词）\n",
    "trg_idx2word = {idx: word for word, idx in trg_word2idx.items()}\n",
    "src_idx2word = {idx: word for word, idx in src_word2idx.items()}\n",
    "\n",
    "# 打印词典大小\n",
    "print(f\"英语词典大小: {len(trg_word2idx)}\")\n",
    "print(f\"西班牙语词典大小: {len(src_word2idx)}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8e3d95f7",
   "metadata": {},
   "source": [
    "# Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "f82f9095",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "batch_text----------\n",
      "['hello', 'world']\n",
      "['tokenize', 'text', 'datas', 'with', 'batch']\n",
      "['this', 'is', 'a', 'test']\n",
      "mask----------\n",
      "tensor([0, 0, 0, 0, 1, 1, 1])\n",
      "tensor([0, 0, 0, 0, 0, 0, 0])\n",
      "tensor([0, 0, 0, 0, 0, 0, 1])\n",
      "indices----------\n",
      "tensor([   1, 1825,  308,    3,    0,    0,    0])\n",
      "tensor([   1,    2, 2088,    2,   39,    2,    3])\n",
      "tensor([  1,  24,  12,  10, 894,   3,   0])\n"
     ]
    }
   ],
   "source": [
    "class Tokenizer:\n",
    "    def __init__(self, word2idx, idx2word, max_length=500, pad_idx=0, bos_idx=1, eos_idx=3, unk_idx=2):\n",
    "        self.word2idx = word2idx  # 词到索引的映射字典\n",
    "        self.idx2word = idx2word  # 索引到词的映射字典\n",
    "        self.max_length = max_length  # 序列的最大长度\n",
    "        self.pad_idx = pad_idx  # 填充标记的索引\n",
    "        self.bos_idx = bos_idx  # 句子开始标记的索引\n",
    "        self.eos_idx = eos_idx  # 句子结束标记的索引\n",
    "        self.unk_idx = unk_idx  # 未知词标记的索引\n",
    "\n",
    "    def encode(self, text_list, padding_first=False, add_bos=True, add_eos=True, return_mask=False):\n",
    "        \"\"\"如果padding_first == True，则padding加载前面，否则加载后面\n",
    "        return_mask: 是否返回mask(掩码），mask用于指示哪些是padding的，哪些是真实的token\n",
    "        \"\"\"\n",
    "        max_length = min(self.max_length, add_eos + add_bos + max([len(text) for text in text_list]))  # 计算实际需要的最大长度\n",
    "        indices_list = []  # 初始化索引列表\n",
    "        for text in text_list:  # 遍历每个文本\n",
    "            indices = [self.word2idx.get(word, self.unk_idx) for word in text[:max_length - add_bos - add_eos]]  # 将文本中的词转换为索引，如果词不在词表中则使用unk_idx\n",
    "            if add_bos:  # 如果需要添加句子开始标记\n",
    "                indices = [self.bos_idx] + indices  # 在序列开头添加BOS标记\n",
    "            if add_eos:  # 如果需要添加句子结束标记\n",
    "                indices = indices + [self.eos_idx]  # 在序列末尾添加EOS标记\n",
    "            if padding_first:  # 如果padding需要加在前面\n",
    "                indices = [self.pad_idx] * (max_length - len(indices)) + indices  # 在序列前面添加padding\n",
    "            else:  # 如果padding需要加在后面\n",
    "                indices = indices + [self.pad_idx] * (max_length - len(indices))  # 在序列后面添加padding\n",
    "            indices_list.append(indices)  # 将处理后的索引添加到列表中\n",
    "        input_ids = torch.tensor(indices_list)  # 将索引列表转换为tensor\n",
    "        masks = (input_ids == self.pad_idx).to(dtype=torch.int64)  # 创建mask，1表示padding位置，0表示实际token位置\n",
    "        return input_ids if not return_mask else (input_ids, masks)  # 根据return_mask参数决定返回值\n",
    "\n",
    "    def decode(self, indices_list, remove_bos=True, remove_eos=True, remove_pad=True, split=False):\n",
    "        text_list = []  # 初始化文本列表\n",
    "        for indices in indices_list:  # 遍历每个索引序列\n",
    "            text = []  # 初始化当前文本\n",
    "            for index in indices:  # 遍历序列中的每个索引\n",
    "                word = self.idx2word.get(index, \"[UNK]\")  # 将索引转换为词，如果索引不在词表中则使用\"[UNK]\"\n",
    "                if remove_bos and word == \"[BOS]\":  # 如果需要移除BOS标记且当前词是BOS\n",
    "                    continue  # 跳过这个词\n",
    "                if remove_eos and word == \"[EOS]\":  # 如果需要移除EOS标记且当前词是EOS\n",
    "                    break  # 结束当前序列的处理\n",
    "                if remove_pad and word == \"[PAD]\":  # 如果需要移除PAD标记且当前词是PAD\n",
    "                    break  # 结束当前序列的处理\n",
    "                text.append(word)  # 将词添加到当前文本中\n",
    "            text_list.append(\" \".join(text) if not split else text)  # 根据split参数决定返回连接后的字符串还是词列表\n",
    "        return text_list  # 返回处理后的文本列表\n",
    "\n",
    "# 两个相对于1个tokenizer的好处是embedding的参数量减少\n",
    "src_tokenizer = Tokenizer(word2idx=src_word2idx, idx2word=src_idx2word)  # 创建源语言(西班牙语)的tokenizer\n",
    "trg_tokenizer = Tokenizer(word2idx=trg_word2idx, idx2word=trg_idx2word)  # 创建目标语言(英语)的tokenizer\n",
    "\n",
    "batch_text = [\"hello world\".split(), \"tokenize text datas with batch\".split(), \"this is a test\".split()]\n",
    "indices,mask = trg_tokenizer.encode(batch_text, padding_first=False, add_bos=True, add_eos=True,return_mask=True)\n",
    "\n",
    "print(\"batch_text\"+'-'*10)\n",
    "for raw in batch_text:\n",
    "    print(raw)\n",
    "print(\"mask\"+'-'*10)\n",
    "for m in mask:\n",
    "    print(m)\n",
    "print(\"indices\"+'-'*10)\n",
    "for index in indices:\n",
    "    print(index)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9eb52f7d",
   "metadata": {},
   "source": [
    "# DataLoader"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "d074472c",
   "metadata": {},
   "outputs": [],
   "source": [
    "def collate_fn(batch):\n",
    "    \"\"\"\n",
    "    数据批处理函数\n",
    "    \n",
    "    Args:\n",
    "        batch: 批次数据\n",
    "        src_tokenizer: 源语言tokenizer\n",
    "        trg_tokenizer: 目标语言tokenizer\n",
    "        device: 设备，如果指定则将tensor移至该设备\n",
    "    \n",
    "    Returns:\n",
    "        包含编码后的tensor的字典\n",
    "    \"\"\"\n",
    "    src_texts = [pair[0].split() for pair in batch] #取batch内第0列进行分词，赋给src_words\n",
    "    trg_texts = [pair[1].split() for pair in batch] #取batch内第1列进行分词，赋给trg_words\n",
    "    \n",
    "    # 编码源语言输入\n",
    "    encoder_inputs, encoder_inputs_mask = src_tokenizer.encode(\n",
    "        src_texts, \n",
    "        padding_first=True, #padding加在前面\n",
    "        add_bos=True, \n",
    "        add_eos=True, \n",
    "        return_mask=True\n",
    "    )\n",
    "    \n",
    "    # 编码目标语言输入（用于训练时的teacher forcing）\n",
    "    decoder_inputs= trg_tokenizer.encode(\n",
    "        trg_texts, \n",
    "        padding_first=False, #padding加在后面\n",
    "        add_bos=True, \n",
    "        add_eos=False, \n",
    "        return_mask=False\n",
    "    )\n",
    "    \n",
    "    # 编码目标语言标签（用于计算损失）\n",
    "    decoder_labels, decoder_labels_mask = trg_tokenizer.encode(\n",
    "        trg_texts, \n",
    "        padding_first=False, \n",
    "        add_bos=False, \n",
    "        add_eos=True, \n",
    "        return_mask=True\n",
    "    )\n",
    "    \n",
    "    result = {\n",
    "        \"encoder_inputs\": encoder_inputs,\n",
    "        \"encoder_inputs_mask\": encoder_inputs_mask,\n",
    "        \"decoder_inputs\": decoder_inputs,\n",
    "        \"decoder_labels\": decoder_labels,\n",
    "        \"decoder_labels_mask\": decoder_labels_mask\n",
    "    }\n",
    "    \n",
    "    # 如果指定了设备，将所有tensor移至该设备\n",
    "    if device is not None:\n",
    "        result = {k: v.to(device=device) for k, v in result.items()}\n",
    "    \n",
    "    return result\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "8d33abe2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "encoder_inputs\n",
      "tensor([[    0,     1, 16264,   147,     9,    62,  1068,     4,     3],\n",
      "        [    1,    29,    50,  6324,    21,    11,   261,     4,     3]])\n",
      "encoder_inputs_mask\n",
      "tensor([[1, 0, 0, 0, 0, 0, 0, 0, 0],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0, 0]])\n",
      "decoder_inputs\n",
      "tensor([[   1, 9093,  566,   13,  421,  292,    4,    0],\n",
      "        [   1,   28,   20, 1149,   82,    6,  391,    4]])\n",
      "decoder_labels\n",
      "tensor([[9093,  566,   13,  421,  292,    4,    3,    0],\n",
      "        [  28,   20, 1149,   82,    6,  391,    4,    3]])\n",
      "decoder_labels_mask\n",
      "tensor([[0, 0, 0, 0, 0, 0, 0, 1],\n",
      "        [0, 0, 0, 0, 0, 0, 0, 0]])\n"
     ]
    }
   ],
   "source": [
    "from torch.utils.data import DataLoader\n",
    "sample_dl = DataLoader(train_dataset, batch_size=2, shuffle=True, collate_fn=collate_fn)\n",
    "\n",
    "#两次执行这个代码效果不一样，因为每次执行都会shuffle\n",
    "for batch in sample_dl:\n",
    "    for key, value in batch.items():\n",
    "        print(key)\n",
    "        print(value)\n",
    "    break"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "87714908",
   "metadata": {},
   "source": [
    "# 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "806adc98",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "class Encoder(nn.Module):\n",
    "    \"\"\"\n",
    "    序列到序列模型的编码器部分\n",
    "    \"\"\"\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_size, num_layers=1, dropout=0.0):\n",
    "        \"\"\"\n",
    "        初始化编码器\n",
    "        \n",
    "        参数:\n",
    "        - vocab_size: 源语言词汇表大小\n",
    "        - embedding_dim: 词嵌入维度\n",
    "        - hidden_size: 隐藏状态维度\n",
    "        - num_layers: GRU层数\n",
    "        - dropout: Dropout比率\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        \n",
    "        # 词嵌入层\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        \n",
    "        # GRU层,batch_first=True表示输入的形状为[batch_size, seq_len]\n",
    "        self.gru = nn.GRU(\n",
    "            input_size=embedding_dim,\n",
    "            hidden_size=hidden_size,\n",
    "            num_layers=num_layers,\n",
    "            batch_first=True,\n",
    "            dropout=dropout if num_layers > 1 else 0\n",
    "        )\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.hidden_size = hidden_size\n",
    "        self.num_layers = num_layers\n",
    "    \n",
    "    def forward(self, src, src_mask=None, src_lengths=None):\n",
    "        \"\"\"\n",
    "        前向传播\n",
    "        \n",
    "        参数:\n",
    "        - src: 源语言序列 [batch_size, seq_len]\n",
    "        - src_mask: 源语言序列的掩码 [batch_size, seq_len]\n",
    "        - src_lengths: 源语言序列的实际长度 [batch_size]\n",
    "        \n",
    "        返回:\n",
    "        - encoder_outputs: 编码器所有时间步的输出 [batch_size, seq_len, hidden_size]\n",
    "        - hidden: 解码器初始隐藏状态 [num_layers, batch_size, hidden_size]\n",
    "        \"\"\"\n",
    "        \n",
    "        # 词嵌入\n",
    "        embedded = self.dropout(self.embedding(src))  #[batch_size, seq_len] -> [batch_size, seq_len, embedding_dim]\n",
    "        \n",
    "        # 通过GRU\n",
    "        #[batch_size, seq_len, embedding_dim]-> encoder_outputs [batch_size, seq_len, hidden_dim]\n",
    "        #[batch_size, seq_len, embedding_dim]-> hidden [num_layers, batch_size, hidden_dim]\n",
    "        encoder_outputs, hidden = self.gru(embedded) \n",
    "        \n",
    "        # 返回编码器所有时间步的输出和解码器初始隐藏状态\n",
    "        return encoder_outputs, hidden\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "bb7290e4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "源序列形状: torch.Size([64, 20])\n",
      "编码器输出形状: torch.Size([64, 20, 512])\n",
      "隐藏状态形状: torch.Size([2, 64, 512])\n",
      "Encoder测试通过!\n"
     ]
    }
   ],
   "source": [
    "# 测试Encoder\n",
    "import torch\n",
    "\n",
    "# 创建测试参数\n",
    "vocab_size = len(src_tokenizer.word2idx)\n",
    "embedding_dim = 256\n",
    "hidden_size = 512\n",
    "num_layers = 2\n",
    "dropout = 0.3\n",
    "batch_size = 64\n",
    "seq_len = 20\n",
    "\n",
    "# 实例化Encoder\n",
    "encoder = Encoder(vocab_size, embedding_dim, hidden_size, num_layers, dropout)\n",
    "\n",
    "# 创建测试输入\n",
    "src = torch.randint(0, vocab_size, (batch_size, seq_len))  # [batch_size, seq_len]\n",
    "\n",
    "# 前向传播\n",
    "encoder_outputs, hidden = encoder(src)\n",
    "\n",
    "# 打印输出形状\n",
    "print(f\"源序列形状: {src.shape}\")\n",
    "print(f\"编码器输出形状: {encoder_outputs.shape}\")\n",
    "print(f\"隐藏状态形状: {hidden.shape}\")\n",
    "\n",
    "# 验证输出维度是否符合预期\n",
    "assert encoder_outputs.shape == (batch_size, seq_len, hidden_size)\n",
    "assert hidden.shape == (num_layers, batch_size, hidden_size)\n",
    "\n",
    "print(\"Encoder测试通过!\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0d68b24b",
   "metadata": {},
   "source": [
    "# Bahdanau注意力"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "8a86ea51",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-inf, -inf, 3., 4.]])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 模拟 logits 和 mask\n",
    "logits = torch.tensor([[1.0, 2.0, 3.0, 4.0]])\n",
    "mask = torch.tensor([[1, 1, 0, 0]])  # 只让前两个位置有效\n",
    "\n",
    "# 把无效位置设为 -inf\n",
    "masked_logits = logits.masked_fill(mask == 1, float('-inf'))\n",
    "masked_logits"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "ce1b8f5f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BahdanauAttention(nn.Module):\n",
    "    \"\"\"\n",
    "    Bahdanau注意力机制\n",
    "    \n",
    "    参数:\n",
    "        hidden_size: 隐藏状态的维度\n",
    "        key_size: 键向量的维度（如果与隐藏状态不同）\n",
    "        value_size: 值向量的维度（如果与隐藏状态不同）\n",
    "    \"\"\"\n",
    "    def __init__(self, hidden_size, key_size=None):\n",
    "        super().__init__()\n",
    "        \n",
    "        # 如果key_size和value_size未指定，则默认与hidden_size相同\n",
    "        if key_size is None:\n",
    "            key_size = hidden_size\n",
    "            \n",
    "        # 定义注意力层\n",
    "        self.Wq = nn.Linear(hidden_size, hidden_size)\n",
    "        self.Wk = nn.Linear(key_size, hidden_size)\n",
    "        self.V = nn.Linear(hidden_size, 1, bias=False)\n",
    "        \n",
    "    def forward(self, query, keys, values, attn_mask=None):\n",
    "        \"\"\"\n",
    "        参数:\n",
    "            query:decoder的隐藏状态 查询向量 [batch_size, hidden_size]\n",
    "            keys: EO 键向量 [batch_size, src_len, key_size]\n",
    "            values:EO  值向量 [batch_size, src_len, value_size]\n",
    "            attn_mask: 注意力掩码 [batch_size, src_len]\n",
    "            \n",
    "        返回:\n",
    "            context: 上下文向量 [batch_size, value_size]\n",
    "            attention_weights: 注意力权重 [batch_size, src_len]\n",
    "        \"\"\"\n",
    "        src_len = keys.size(1)\n",
    "        \n",
    "        # 将query从[batch_size, hidden_size]转换为[batch_size, 1, hidden_size]\n",
    "        query = self.Wq(query).unsqueeze(1).repeat(1, src_len, 1)\n",
    "        \n",
    "        # 转换keys,shape=[batch_size, src_len, hidden_size]\n",
    "        keys = self.Wk(keys)\n",
    "        \n",
    "        # 计算注意力分数\n",
    "        energy = torch.tanh(keys+query)\n",
    "        # [batch_size, src_len, hidden_size] -> [batch_size, src_len, 1] -> [batch_size, src_len]\n",
    "        attention = self.V(energy).squeeze(2)\n",
    "        \n",
    "        # 应用注意力掩码（如果提供）\n",
    "        if attn_mask is not None:\n",
    "            attn_mask = attn_mask * -1e16 \n",
    "            attention += attn_mask #加上一个负无穷，让padding部分经过softmax后为0\n",
    "        \n",
    "        # 使用softmax归一化注意力权重  [batch_size, src_len]\n",
    "        attention_weights = F.softmax(attention, dim=1)\n",
    "        \n",
    "        # 计算上下文向量,values是EO,shape=[batch_size, src_len, hidden_dim]\n",
    "        context_vector = torch.mul(attention_weights.unsqueeze(-1), values).sum(dim=1) #对每一个词的score和对应的value做乘法，然后在seq_len维度上求和，得到context_vector\n",
    "        # context_vector.shape = [batch size, hidden_dim]\n",
    "        #attention_weights用于最后的画图\n",
    "        return context_vector, attention_weights\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "520ba85e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Query shape: torch.Size([2, 8])\n",
      "Keys shape: torch.Size([2, 4, 8])\n",
      "Values shape: torch.Size([2, 4, 8])\n",
      "Context vector shape: torch.Size([2, 8])\n",
      "Attention weights shape: torch.Size([2, 4])\n",
      "\n",
      "Attention weights:\n",
      "tensor([[0.0000, 0.3273, 0.3856, 0.2871],\n",
      "        [0.3577, 0.2613, 0.2053, 0.1757]], grad_fn=<SoftmaxBackward0>)\n"
     ]
    },
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAbYAAAFQCAYAAADTFFvvAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAP7hJREFUeJzt3Ql4FFXWBuDTBEOAEBaRHVmVTfZNUEBkFxUUHUD9WUQcRZFNEVQ2wUEREAUGBAYVhIFxAxVEEAVBQHYFFEcQ2XdkXwLp+p/vaPVUJ52kq5JOOtXf61OSrq6qrq50+tS999x7PYZhGEJEROQS2TL7BIiIiNITAxsREbkKAxsREbkKAxsREbkKAxsREbkKAxsREbkKAxsREbkKAxsREblK9sw+ASIiSh+XL1+W+Ph4x/tHR0dLTEyMZHUMbERELglqZUrFypFjCY6PUaRIEdmzZ0+WD24MbERELoCSGoLank2lJC6P/Vams+e8Uqb2Xj0OAxsREYWN3LF/LnYluGjUYAY2IiIX8Yqhi5P93IKBjYjIRbz6n7P93ILp/kRE5CossRERuUiCYejiZD+3YGAjInIRL9vYGNiIiNzEK4YkMLAREZFbeFliY2AjInKTBLaxMSuSiIjchSU2IiIX8f61ONnPLRjYiIhcJMFh8oiTfcIVAxsRkYskGM7GfeRYkUREFJa8rIpkYCMichOveCRBPI72cwtmRRIRkauwxEZE5CJe48/FyX5uwcBGROQiCQ6rIp3sE64Y2IiIXCSBgY2BjYjITbyGRxcn+7kFAxsRkYsksMTGrEgiInIXBjYiIhdJkGyOF7smT54spUuXlpiYGKlfv76sX78+2W0//vhjqVOnjuTLl09y584tNWrUkNmzZ/tt061bN/F4PH5L69atbZ8XqyKJiFzEcNjGhv3smD9/vvTv31+mTp2qQW3ChAnSqlUr+eWXX6RQoUJJti9QoIC8+OKLUrFiRYmOjpbPP/9cunfvrttiPxMC2TvvvON7nCNHDtvvxWMYLpqEh4goQp09e1by5s0rS7eVktx57Je+LpzzSsuqe+XMmTMSFxeX6vYIZnXr1pVJkybpY6/XKyVLlpTevXvLoEGDgnrNWrVqSdu2bWXkyJG+Etvp06dlwYIFkhasiiQicpEEI5vjxQyQ1uXKlStJXiM+Pl42bdokzZs3963Lli2bPl67dm2q54jy1PLly7V017hxY7/nVqxYoaW4ChUqyJNPPiknT560fQ0Y2IiIXMQrHvFKNgfLn1WRKHWh5Gcuo0ePTvIaJ06ckISEBClcuLDfejw+cuRIsueG0mBsbKxWRaKkNnHiRGnRooVfNeSsWbM06L322muycuVKadOmjb6WHWxjIyIin/379/tVRTpp40pOnjx5ZOvWrXL+/HkNXmijK1u2rNxxxx36fKdOnXzbVq1aVapVqyblypXTUlyzZs2Cfh0GNiIiF0lIYz82BLXU2tgKFiwoUVFRcvToUb/1eFykSJFk90N1Zfny5fVnZEX+/PPPWiI0A1tiCHp4rV27dtkKbKyKJCJykYQ0trEFA1WJtWvX1lKXCckjeNygQYOgj4N9ArXhmQ4cOKBtbEWLFhU7WGIjInJdG5vH0X52oBqxa9eu2jetXr16mu5/4cIFTeGHLl26SPHixX1tdPgX26JqEcFs8eLF2o9typQp+jyqJ0eMGCEdOnTQUt/u3btl4MCBWsKzdgcIBgMbEZGLeB12tvaKvZ5fHTt2lOPHj8vQoUM1YQRVi0uWLPEllOzbt0+rHk0Ier169dJSWM6cObU/2/vvv6/HAVRt/vjjj/Lee+9pyn+xYsWkZcuW2hXAbjsf+7EREbmoH9u8rZUlV54o2/tfPJcgnWr8FHQ/tnDGNjYiInIVVkUSEbmI969+afb3c0/lHQMbEZGLJBgeXZzs5xYMbERELpLgMHkkgSU2IiIKR14jmy7292NgIyKiMJTAEhuzIomIyF1YYiMichGvw0QQ7OcWDGxERC7idZzu754KPAY2IiIXSbA5oLF1P7dgYCMichFvBg2CHM4Y2IiIXCSBJTYXVaoSERGxxEZE5C4Jjvuxuaecw8BGROQiXsOji5P93IKBjYjIRbyOJxpliY2IiFw1VmQ2cQsGNiIiF0kQjy5O9nML94RoIiIiltiIiNzFy6pIBjYiIjdJcFitiP3cgoGNiMhFvCyxMbAREblJAofUYmAjInITw+EgyNjPLdwToomIiFhiIyJylwRWRbLERpHN4/HI8OHDxa3uuOMOXZzue8stt6T7OVHGjBXpdbC4BQMbOfbPf/5TA0P9+vUDPv/TTz9p0Pj9998D7vvuu+9mwFmKLF68OKyC15gxY/S6bdmyxW+9YRiSP39+fW7Pnj1+z12+fFly5MghDz30kISbQ4cO6fXdunVrZp8KWUb3T3CwuIV73glluDlz5kjp0qVl/fr1smvXroCBbcSIEWER2HAegVy6dEleeuklyUi33367/rt69Wq/9Tt27JDTp09L9uzZ5bvvvvN7bsOGDRIfH+/bN1hLly7VJdSBDdeXgS08eFliY2AjZ1CiWLNmjYwfP15uuOEGDXJZUUxMjAaSjFSnTh193cSBDcHs+uuvl2bNmiV5znxsN7BFR0frQpHDK9kcL3ZNnjxZb27xeUbNDW5yk/Pxxx/rZz9fvnySO3duqVGjhsyePTtJrcXQoUOlaNGikjNnTmnevLn8+uuvts+LgY0cQSBDtVnbtm3lgQceSBLYUBp78MEH9eemTZtq9RqWFStW6B8CSicrV670rbe2A6HU0rdvXylZsqRWv5UvX15ee+018Xq9vm1QCsR+Y8eOlWnTpkm5cuV027p162rpxtStWzf94wPztbCk1MaGKsI2bdpIXFycxMbGaqBZt25dkveHfRGM+vfvr8Edf6z33XefHD9+PMVrh0CD80xcKsPjBg0ayG233RbwOXwhmG1euBYTJkyQKlWq6JdK4cKF5e9//7v88ccfqbax7d27V+69914930KFCkm/fv3kyy+/9P1+ApW88TvMlSuXFC9eXKtSTdge7wW6d+/uu75maRxfSh06dJAiRYroeZYoUUI6deokZ86cSfEaUfibP3++fvaHDRsmmzdvlurVq0urVq3k2LFjAbcvUKCAvPjii7J27Vr58ccf9fOCBZ89Ez5bb731lkydOlW+//57/YzimKiKt4NZkeQIAtn999+vX9KdO3eWKVOmaEAxv+QaN24szzzzjH5IX3jhBalUqZKux7/4Qu7du7cGDXzQAV/McPHiRWnSpIkcPHhQv6hvvPFGLRkOHjxYDh8+rPtazZ07V86dO6fb4gsVfxg4r99++02uu+46XY+qsmXLliW5OwwEAbdRo0Ya1AYOHKjHePvttzU4IBAnbk/E+0CAxx83gi3O7+mnn9Y/+pSg5LVq1SrdB4HeDF6PPfaY1KtXT4+HAI9ghrtYXAMEvWzZ/rwXxftC8MAXA64zStCTJk3SoIzj4LwDuXDhgtx55516Lfv06aMBB9fwm2++Cbg9AmXr1q31mv7tb3+TDz/8UJ5//nmpWrWqBn/8Pl9++WW9y3788cf12kHDhg216hRfSleuXNHrhNfC7/Xzzz/X95Y3b95Ufx9kX4Lh0cXJfnagtqZnz576GQQEo0WLFsnMmTNl0KBBSbZPfIOFz997772ntRH4nOBzjr8fNA20a9dOt5k1a5Z+NyxYsEBviIJmENm0ceNGAx+dZcuW6WOv12uUKFHC6NOnj992H3zwgW73zTffJDlGlSpVjCZNmiRZP3LkSCN37tzGf//7X7/1gwYNMqKioox9+/bp4z179uixr7/+euPUqVO+7RYuXKjrP/vsM9+6p556StcFgvXDhg3zPW7fvr0RHR1t7N6927fu0KFDRp48eYzGjRv71r3zzju6b/PmzfX9m/r166fnefr0aSMlixYt0v1nz56tjw8fPqyPV65caZw7d06PgW1g+/bt+twrr7yij1etWqWP58yZ43fMJUuWJFmPa2y9zuPGjdNtFixY4Ft36dIlo2LFikl+V9gP62bNmuVbd+XKFaNIkSJGhw4dfOs2bNig2+GaWG3ZskXX43NAoXfmzBm93n//toPxzOZOthfsh/3379+vxzKXy5cvJ3ktfA7wGf3kk0/81nfp0sW49957Uz1X/M189dVXRq5cuYylS5fqOvzN4fXxubHC390zzzxj61qwKpIcldZwF4XqKUBJqWPHjjJv3jxJSEjbUKoffPCB3vWjFHTixAnfgrp2HPvbb7/12x6vi21NZokBJTa7cHwkWrRv317Kli3rW4/6fmQj4s7y7NmzfvuglGKt2sTr4zio7ksJSjQofZltZ2YpCyVelGSrVavmq440/zXb13CNUNpp0aKF3zWqXbu27ptc6QuWLFmi1YmoijShihB33oHgeI888ojvMUroKFEGc33NEhmqmlASp4xh/DVWpN3FDAdoAsDvzlxGjx6d5DXwecPn3KxpMeHxkSNHkj03VEHjM4XPEZoxJk6cqJ9jMPeze8xAWBVJtuDDjACGoGZNSUcV3bhx42T58uXSsmVLx8dHmwzq39FmFUji+ntUVVqZQS5xW1Mw0DaGL+AKFSokeQ5VbmjX2r9/v7ZrpfX1UcWI41iDV82aNbXB3Ax81ufMgGJeI3xBoH0skOTaOAABF+2R1mAMaMcMBG1iibfFe8TvKDVlypTRNhhUWeFmCEEfARWBktWQ4TvR6P79+7Uq3oS26/SSJ08ezZ49f/68flfg84GbSKd9LZPDwEa2fP3119o+g+CGJTF8gaUlsCF44A4O7VuB3HzzzX6Po6KiAm73Zy1j6KXl9VECQ7sE2psQvBDMTPgZbRVXr17VUh1KYyhZmdcIQS25TNTkbgqcSOv1xc0OEngWLlyopWG0B6IEgGQcBE1Kf17jz5R/J/sBgpo1sAVSsGBB/WwcPXrUbz0eoy01OailMG+ikBX5888/6+cBgc3cD8dALYn1mNjWDgY2sgVfpvhSNTMNE6fzfvLJJ/pljZJH4jt9q+SeQ2kCd3OoekwvKZ1H4oCAzL9ffvklyXM7d+7UP0pU06QXBDYk3Xz11Vea9PHcc8/5BTb0sUNjPKr9kFlovUbYB9mTZgkvWKVKldIsRwQm63UJ1A8xva4vEk2wICkASTA4b3xGRo0a5fg1KXNFR0frzRZKXai6N2+48BjJU8HCPkguMkv4CG44hhnIUPWP7Mgnn3zS1vmxjY2Chi9aBK+7775bU/wTL/hAI0Px008/1e2RqgsokSSG5wKtR+Yd0oGtKcAmbH/t2jXb553SeVjhDhSlTZQurJ3KcceIzEEEotTuZO0w28xQVYeSmbXEhkxJ3LWaqfXW/mu4RqgSHjlyZJJj4vqk9D6RfYbMRPN3BEilnj59uuP3kdz1xZdS4t8XAhxuEMwvM0p/XodtbHbnY0M1Ij43yGxEyQvBB1m3ZpZkly5dNJvZhJIZspNxo4btUZpHprLZhosbJHTzwQ0PPp/btm3TYxQrVswXPIPFEhsFDR82BC5r4oHVrbfe6uusjaQO3HUhWKAPGtqEUFePVHOU+HC3h9IKPsSomsA6PIdSC14HwRNVWNgOfyz4kCPVHAEH1SB24BiAajB8seOckksdxvngjw+BpFevXtp5G+n++CK29t9KD2ifQwkQgRyBDH/AVgh0H330kf7Bo5RjQncIpPvjiwLtFQjGSDxB2xsSS95880290QgE+6FbALpoIN0awRO/L7OaM9jSrRVKkGgzRCkMbSgIdGhz/eGHH/RmB/0ZUYWMIIcvMlx/awmU0pfX4bQ1dvfB3zjapdHVA8kd+HtHcpKZ/LFv3z5f9xTA3zH+pg4cOKA1DRUrVpT3339fj2NCEwS2Q1IWbpTwd4hjmp/PoNnKoaSIds899xgxMTHGhQsXkt2mW7duxnXXXWecOHFCH0+fPt0oW7aspgZb08mPHDlitG3bVtPosd6ako5098GDBxvly5fX1PuCBQsaDRs2NMaOHWvEx8f7pfu//vrrqabwX7t2zejdu7dxww03GB6Pxy/1P/G2sHnzZqNVq1ZGbGyspiM3bdrUWLNmjd82Zro/Ut2t8P6S6+IQSOfOnXX7hx56KMlz48eP1+cqVaoUcN9p06YZtWvXNnLmzKnXsWrVqsbAgQO1e0Jy6f7w22+/6bXHfrgmAwYMMD766CN9rXXr1vnti24ZiXXt2tUoVaqU3zp0s6hcubKRPXt2X+o/XufRRx81ypUrp5+bAgUK6LVEmjeFLt3/oa8fMrqt72Z7wX7YH8fJ6jz4n71QSERug46xGIEEd9PoDkBZD6p+kW3aafkjEh1rfxi1+PPxMq/Z+1q7kp5V7pmBbWxEEdhWaoU2NlS33nTTTQxq5ApsYyOKMBgeC+17aBPB3TnaOZD1mVUHsqYAbWxG6NvYwhkDG1GEQQLNjBkzNJAhu7Jy5craJ9HaiE9Zl+EweQT7uUWWqYo8deqUPPzww1r3iwysHj16aH+nlKDTn3VEdyxPPPFEhp0zUThCSvX27dv17wfVkps2bWJQcxEv52PLOiU2BDWMeIFUbPT5QV8JpISif1FKMAYeRh83oQMuEZFbeR30STP3c4ssEdjQmQ99GTAtCiaqAwyeedddd+l8XIn7/1ghkKU0xAsRkZt4HZa+WGLLYOjAiupHM6gBhlxC5z8Mt4LJHZODdgQ0jiO43XPPPTJkyJAUS23oiGsdFQFDvqAaFDMbO+m8SkSUGvS6wuAHuEm3dmomFwc29GpPPJI5RoTAjKwpTWeAqUYwNh4+LBiNHBMkYhxADAuVHIzmMGLEiHQ9fyKiYGBk/bQODu3NoJFHwlmmBjbMsorhllKrhnQKbXDWMeowfFCzZs1k9+7dOgxQIBjbDGOgmZAOjdTo2+UuyS6BZyWOVPdt9B/Zm/6nS1zyU8dEsp77/jfmJf3P1QvxsrDdPB2SLK28rIrM3MA2YMAAHQ8wJZirB9WIieeYwrhzqCK0036G8evMkcyTC2wYzzDQ/EMIatk9DGxWOWOzRIE/U8TlYXVSINfltj8iRiRJj+YOLwNb5gY2DJgbzNxRDRo00AExkZZsDmiLecHQ/mUGq2BgwFiwzvVDROQmXga2rNGPDbMXt27dWlP3169fr5MyYtRwjNBuZkRiKg6MFo3nAdWNmNYDwRAjwmPEeEyB0LhxY6lWrVomvyMiotDwsh9b1ghsZnYjAhfayJDmj+kMpk2b5nsefduQGHLx4kXfRHiYjBFTemA/VHtiqozPPvssE98FERGFWpZpJEEGZEqdsTGflXWiAsxztXLlygw6OyKi8GA4zHB00zQvWSawERFR6rxsY2NgIyJyEy8DGwMbEZGbeBnYGNiIiNzEy8CWdbIiiYiIgsESGxGRixiGRxcn+7kFAxsRkYt4OQgyAxsRkZt42cbGwEZE5CYGqyIZ2IiI3MTLEhuzIomIyF1YYiMichGDVZEMbEREbmI4rIpkYCMiorBkaJBytp9bsI2NiMiF/di8Dha7Jk+erFOGxcTESP369X0TPQcyffp0adSokeTPn1+X5s2bJ9m+W7du4vF4/BZMMm0XAxsRkQvb2AwHix3z58+X/v37y7Bhw2Tz5s1SvXp1adWqlRw7dizg9itWrJDOnTvLN998I2vXrtU5MzER9MGDB/22QyA7fPiwb/n3v/9t+xowsBERkW3jx4+Xnj17Svfu3aVy5coydepUyZUrl8ycOTPg9nPmzJFevXpJjRo1pGLFijJjxgzxer2yfPlyv+1y5MghRYoU8S0o3WVIYJs9e7bcdtttUqxYMdm7d6+umzBhgixcuNDJ4YiIKJ37sXkdLHD27Fm/5cqVK0leIz4+XjZt2qTViaZs2bLpY5TGgnHx4kW5evWqFChQIEnJrlChQlKhQgV58skn5eTJk6EPbFOmTNHi51133SWnT5+WhIQEXZ8vXz4NbkRElHkMw/kCqCLMmzevbxk9enSS1zhx4oR+9xcuXNhvPR4fOXIkqPN8/vnntXBkDY6ohpw1a5aW4l577TVZuXKltGnTxhdnQpYVOXHiRG0EbN++vbz66qu+9XXq1JFnn33W7uGIiCiM+rHt379f4uLi/KoG0xtix7x587R0hsQTU6dOnXw/V61aVapVqyblypXT7Zo1axa6EtuePXukZs2aSdbjzV+4cMHu4YiIKIySR+Li4vyWQIGtYMGCEhUVJUePHvVbj8doF0vJ2LFjNbAtXbpUA1dKypYtq6+1a9cuW9fAdmArU6aMbN26Ncn6JUuWSKVKlewejoiIwqiNLRjR0dFSu3Ztv8QPMxGkQYMGye43ZswYGTlypMYL1PKl5sCBA9rGVrRoUQlpVSTa15566im5fPmyGIah/RCQjol6WGS5EBGR+/Xv31+6du2qAapevXqaY4FaO2RJQpcuXaR48eK+Njq0mQ0dOlTmzp2rfd/MtrjY2Fhdzp8/LyNGjJAOHTpoqW/37t0ycOBAKV++vHYjCGlge+yxxyRnzpzy0ksvaVbLQw89pA2Ab775pl/9KBERZTzDkghidz87OnbsKMePH9dghSCFNH6UxMyEkn379mmmpDXxENmUDzzwgN9x0A9u+PDhWrX5448/ynvvvaeJiYgr6OeGEp7ddj5HQ2o9/PDDuiCwIcoiNTOjoKf766+/rhcSHQKRzIK7heR88MEHMmTIEPn999/lpptu0rsGZHQSEbk3sHkc7WfX008/rUsgSPiwwndwSlBg+vLLLyU9pKmDNjrjZWRQs9vTfc2aNdrTvUePHrJlyxbN5MSyffv2DDtnIiI3jjwSzoIqsSELEmN2BQMBJyN6ugN6ui9atEh7ug8aNCjJ9qgeRb+I5557Th+jSLts2TKZNGmS7ktE5MpBkMXZfhEV2FDKyWxmT/fBgwcH3dMd61HCs0IJb8GCBcm+DnrZW3vao+c9EVFWYXA+tuACG6r+MltKPd137twZcB+0w9ntGY8MHmTmEBFRhM3HtnHjRvn555/1ZwyAiT4NboASobWUhxIbhpghIsoSDNZF2g5s6DCHhIzvvvtOx4cEpGY2bNhQh0gpUaJEKM7TUU93rLfbMx5ppaEYQoaIKEMYDhNBXFQVmc1JPzaMyIzS2qlTp3TBz+h1judCxUlPd6xPPCUCkkdS6hlPRBTJgyBHZIkNoy0jjR5TCpjwM/qTYXbUcOrp3qdPH2nSpImMGzdO2rZtqyVKVKFOmzYtpOdJRJRZDCaP2A9saG9CiS0xJHagp3go2e3pjupRDN+CUVJeeOEF7aCNjMhbbrklpOdJRJRpDI+zasVIDmwY9aN37946Aog5iCVKQSgdYdTmULPT0x0efPBBXYiIKDLYDmzdunXTobTq168v2bP/ufu1a9f050cffVQXE9rfiIjIfWNFuiqwcZZsIqIwZjDd33ZgQ/IGERGFJ4PJI847aGPgYSxIubdKbUZUIiIKMUMimu3AhvEaUWpD3zVMNGqFgZKRHUlERJnDYInNfmBDcsjNN98s//rXvzTNPthR/4mIiMIysP3222/y0Ucf6XTdREQUZgwmj9geUqtZs2byww8/hOZsiIgojTxpWCK0xDZjxgxtY8Ms1BjB47rrrvN7/t57703P8yMiIjsMlthsBzZM3omR/b/44oskzzF5hIgokxkMbLarIjGc1iOPPCKHDx/WVH/rwqBGRBQmY0UaDpZIDWwnT56Ufv36JZmZmoiIKEsGtvvvv1+++eab0JwNERGlicH52Oy3saEP2+DBg2X16tVStWrVJMkjzzzzTHqeHxER2WGwjc1RVmRsbKxOOIolcfIIAxsRUSYyOB+b7cC2Z8+e0JwJERGlmcf4c3Gyn0T6IMhERBSGDFZFOgpsBw4ckE8//VT27dsn8fHxfs+NHz8+vc6NiIgo9IFt+fLlOrpI2bJlZefOnTr6yO+//64j/deqVcv+GRARUfox2MZmO90fGZHPPvusbNu2TWJiYnRA5P3790uTJk3kwQcfDM1ZEhGRvapIw8Fi0+TJk6V06dIaC+rXry/r169Pdtvp06dLo0aNJH/+/Lo0b948yfYoIA0dOlSKFi0qOXPm1G1+/fXX0Ac2zMPWpUsX/Tl79uxy6dIlzZJ8+eWX5bXXXrN9AkRElPUC2/z586V///4ybNgw2bx5s1SvXl1atWqlE1AHsmLFCuncubP2g8bQjCVLlpSWLVvKwYMHfduMGTNG3nrrLZk6dap8//33kjt3bj3m5cuXQxvY8EJmuxqi6u7du33PnThxwu7hiIgoCwa28ePHS8+ePaV79+5SuXJlDUa5cuWSmTNnBtx+zpw50qtXL6lRo4ZUrFhRu45hKEY0b+lpG4ZMmDBBXnrpJWnXrp1Uq1ZNZs2aJYcOHZIFCxaENrDdeuut2jkb7rrrLhkwYIC88sorOgEpniMioqw7VuTZs2f9litXriR5CRRuNm3apFWFpmzZsuljlMaCcfHiRbl69aoUKFDA15XsyJEjfsfMmzevVnEGe0zfudja+q8ojReCESNG6PxsKJKinhWzahMRUdZVsmRJDSjmMnr06CTboHYOg94nHjMYjxGcgvH8889LsWLFfIHM3C8tx3ScFYlsSGu1JIqfRETkjg7a+/fvl7i4ON/6HDlySHp79dVXZd68edruhsST9Ga7xGaFBr333ntPpkyZIrt27ZKMYCcL591339VhvqxLKC4iEZFb2tji4uL8lkCBrWDBghIVFSVHjx71W4/HRYoUSfH0xo4dq4Ft6dKl2o5mMvdzckzHgQ3ZL5iLzVrH2qBBA208RBcANAjarQe1y24WDuAXg7njzGXv3r0hPUciIreLjo6W2rVr+xI/wEwEQVxIDrIeR44cKUuWLJE6der4PVemTBkNYNZjoo0P2ZEpHTNNgQ3RtUWLFn4ZLggS6GPwxx9/aB+2UaNGSSjZzcIBlNJwscyF88gRkZt5LNWRthabr4NCBvqmodYO3cCefPJJuXDhgn4/A7qFodBjQnewIUOG6Pc1at3Qbobl/Pnzf563xyN9+/bVOIKRrdBXGsdAO1z79u1D08aG4bMQTKyB7oEHHpBSpUrp4z59+miWZKiYWTjWCxVMFg4uGs4RdxMYGeUf//iHVKlSJdntkQFkzQLCHQOM3L5BYvOkqebWdXrtfCizTyFsjdpTMLNPISxFn4zK7FMIS16b/bTCQceOHeX48ePaoRoBCrV2KImZhQfEDHxHm9Bkhe9xxA0r1MANHz5cfx44cKAGx8cff1xOnz4tt99+ux7TbhNS0IENJ4h+BqZ169Zp9DXly5dPS26hklIWDob2CqRChQp6d4B63DNnzmjdbsOGDWXHjh1SokSJgPsgAwjZnkREWZKRcUNqPf3007oEgsQQKwy9mBqU2jDYB5a0CLoIUqlSJfnss8/0ZwQGROOmTZv6nke1ZLhV86FeFkVZ3ElgyK+PP/5YbrjhBnn77beT3QclQgRBc0GGEBFRlmFk3JBa4SroEhuKiJ06dZJFixZpYEO1Ixr7TIsXL5Z69eqF6jzTlIVjwmzfNWvWTDGDExlAoUhvJSLKEAanrQm6xHbfffdp8EK1Xr9+/TRD0QpJHBguJdyycKxQlYkGSQwFRkTkRh4niSMO+76FK1sdtDHKCJZA0AAYasjC6dq1q6aJonSIccUSZ+EUL17c11Me9bQY5qt8+fLaEPn6669rleljjz0W8nMlIsoUBkts2d2chYNkFnQPwLaYJgElvjVr1vhldxIRkbtkqcBmNwvnjTfe0IWIKGIYLLFlucBGREShGyvSDRjYiIjcxMi4fmzhioGNiMhNDFZF2h4jCv3G/u///k/H78qePbv2LbMuRESUeTxM97dfYuvWrZtmH2I4LfQHwxAoREREWTawrV69WlatWqWp9kREFGYMVkVmdzJtuHUwZCIiCiOGw2rFSG5jw2gfgwYNCmqkZiIiymAGB0EOqsSGUTusbWkYxqpcuXI6PiQGFrY6depU+p8lEREFx2BVZPZgS2lERBT+POygHVxgw8DDRERErmxjw9Q1X375ZZL1S5culS+++CK9zouIiChjAhsSRzCvWWKYGw3PERFRJjKYPGI73f/XX38NOO1LxYoVU5yZmoiIQs/DNjb7Jba8efPKb7/9lmQ9glru3LnT67yIiMgpI3JLa44CW7t27aRv376ye/duv6A2YMAAuffee9P7/IiIyA6DVZG2A9uYMWO0ZIaqxzJlyuhSqVIluf7662Xs2LGhOUsiIqJQtbGhKnLNmjWybNky+eGHHyRnzpxSrVo1ady4sd1DERFROvOwjc1+YJs1a5Z07NhRWrZsqYspPj5e5s2bJ126dEnvcyQiomAZHHnEdlVk9+7d5cyZM0nWnzt3Tp8jIqLM4+F8bPZLbBjZP9AcbAcOHNBqSiIiykQGS2xBB7aaNWtqQMPSrFkznT3bhA7be/bskdatW4fqPImIKBgGA1vQga19+/b679atW6VVq1YSGxvrey46OlpKly4tHTp0CM1ZEhERpXdgGzZsmP6LAIbkkZiYmGB3JSIiF2ZFTp48WV5//XU5cuSIVK9eXSZOnCj16tULuO2OHTtk6NChsmnTJtm7d6+88cYb2ifaavjw4TJixAi/dRUqVJCdO3eGNnkEI/1nVlD79ttv5Z577pFixYppleiCBQtS3WfFihVSq1YtyZEjh5QvX17efffdDDlXIiI3d9CeP3++9O/fXws9mzdv1sCG2rxjx44F3P7ixYtStmxZefXVV6VIkSLJHrdKlSpy+PBh37J69Wq7V8B+YEN7GjpiIyrj5AoUKOC3hBImOMXFw11CMNDu17ZtW2natKlWoeLu4LHHHgs4OwERkSsYGRPYxo8fLz179tRseIwfPHXqVJ18eubMmQG3r1u3rpbuOnXqpAWN5CB/A7HFXAoWLBj6wIZiIt4QqiOR9o+Iff/990u2bNm0GBlKbdq0kVGjRsl9990X1Pa40BgZZdy4cTo6ytNPPy0PPPCAFoGJiNzIk8Z0/7Nnz/otV65cSfIa6LeMKsXmzZv71iEG4PHatWvTdP4YaB+1cijdPfzww7Jv377QB7Y5c+bI9OnTdWxIRNbOnTvLjBkztO503bp1Ek5wga0XHlBUTunC45eY+BdLRBQpJbaSJUtq1y1zGT16dJKXOHHihNbeFS5c2G89HqO9zan69etrc9GSJUtkypQpWuvWqFEj7Scd0n5sOOmqVavqz8iMNDtr33333TJkyBAJJzjXQBcewerSpUs6HFhi+CUmbrwkIooU+/fvl7i4ON/jlKoNQ1ErZ8JQjQh0pUqVkv/85z/So0eP0JXYSpQooQ16UK5cOZ05GzZs2JChFyBUBg8erMHaXPBLJiKKlKrIuLg4vyXQ9zravaKiouTo0aN+6/E4pcQQu/Llyyc333yz7bk+bQc2tG8tX75cf+7du7eW0m666SYdI/LRRx+VcIILHOjC45cVqLQG+CUm/sUSEWUZRuiTR9B3uXbt2r5YAF6vVx83aNAg3d7K+fPndYq0okWLhrYqEqmaJiSQ3HjjjdpmheCGVPxwggu8ePFiv3WYlSA9LzwRUSSOPNK/f3/t/lWnTh3Nkp8wYYJmrptjBqOwU7x4cV8bHRJOfvrpJ9/PBw8e1Gx1NGmhKxY8++yzGkdQ/Xjo0CHtSoCSIXI5QhrYEkOQyKhAgehtLZKiYREXBt0MEGBRjYiLhRkI4IknnpBJkybJwIEDtTT59ddfa13tokWLMuR8iYgymuevxcl+dqBgc/z4cU0cRD5DjRo1NOnDzGtANiMyJU0IVBia0YRuY1iaNGmi/Y3NMYcRxE6ePCk33HCD3H777ZqUiJ9DGtjwgphUFND+hAxJJGJg9mxkr4TSxo0btU+a9Y4BcNeATBq0/VlTQ5HqjyDWr18/efPNN7V9EBmcyIwkInIlI+PGikQXKiyBmMHKhFGrMIh+SjD1WXoIOrBt27ZNi4gIZqh2xAlg0GMUPRGV0Tfsww8/9I0pGQp33HFHihcm0Kgi2GfLli0hOyciIgovQSePoDoPaf4Y1grBAun9GNUDmYN//PGH/P3vf/drfyMioozn4XxswZfYkM6PNir0LcCwVtOmTZNevXr56lCRIXnrrbeG8lyJiCg1BqetCTqwnTp1ytc/AVksuXPnlvz58/uex892e4cTEVEIGBLRbCWPJJ45O9BM2kREFBnT1rgisHXr1s3XC/3y5cuaTo+SGwQaKJOIiDKYwarIoAMbUuqtHnnkkSTboEMeERFRlghs77zzTmjPhIiI0szDqsi0jzxCRERhxGBVJAMbEZGLeFhiY2AjInIVgyU2BjYiIjcxGNhsz8dGREQUzlhiIyJyEQ/b2BjYiIhcxWBVJAMbEZGLeAxDFyf7uQUDGxGRmxgssTGwERG5iIdtbMyKJCIid2GJjYjITQxWRTKwERG5iIdVkQxsRESuYrDExsBGROQiHpbYGNiIiFzFYImNWZFEROQqWSqwffvtt3LPPfdIsWLFxOPxyIIFC1LcfsWKFbpd4uXIkSMZds5ERJlVHemxsbhJlgpsFy5ckOrVq8vkyZNt7ffLL7/I4cOHfUuhQoVCdo5ERJnKMJwvLpGl2tjatGmji10IZPny5QvJORERhRMPk0eyVmBzqkaNGnLlyhW55ZZbZPjw4XLbbbcluy22w2I6c+aM/nvhvDdDzjUruXbhf9eJ/HkvXc7sUwhL3stRmX0KYcl75c/Pi5EepSaDySOuDmxFixaVqVOnSp06dTRYzZgxQ+644w75/vvvpVatWgH3GT16tIwYMSLJ+ta3sl0uqTcz+wSIXOXcuXOSN2/eNB3D4/1zcbKfXWgWev311zVvAc1EEydOlHr16gXcdseOHTJ06FDZtGmT7N27V9544w3p27dvmo4ZkYGtQoUKupgaNmwou3fv1gs6e/bsgPsMHjxY+vfv73vs9Xrl1KlTcv3112viSWY6e/aslCxZUvbv3y9xcXGZei7hhNclebw2WeO6oKSGoIbEuKxi/vz5+l2JwkP9+vVlwoQJ0qpVK81pCJTHcPHiRSlbtqw8+OCD0q9fv3Q5ZkQGtkAQ+VevXp3s8zly5NDFKtza5/CHGA5/jOGG1yV5vDbhf13SWlLL6KrI8ePHS8+ePaV79+76GMFo0aJFMnPmTBk0aFCS7evWrasLBHreyTFdkRWZHrZu3apVlEREbuRxkOpvTThBada6WHMOTPHx8Vql2Lx5c9+6bNmy6eO1a9c6Ou/0PGaWKrGdP39edu3a5Xu8Z88eDVQFChSQG2+8UasRDx48KLNmzdLnUYwtU6aMVKlSRS5fvqxtbF9//bUsXbo0E98FEVEIGQ5T9//aB1W0VsOGDdOkO6sTJ05IQkKCFC5c2G89Hu/cudPJWafrMbNUYNu4caM0bdrU99hsC+vatau8++672kdt3759fncAAwYM0GCXK1cuqVatmnz11Vd+x8hKUEWKD1niqtJIx+uSPF6byLsunjSm+ydud8yK1yhLBTZkNKaUDovgZjVw4EBd3AIfsMR3TsTrkhJem8B4XdLW7liwYEGJioqSo0eP+q3H4yJFiqS4b0YcM+La2IiIXM1IwxKk6OhoqV27tixfvtwvgxyPGzRo4Oi00/OYWarERkRE4THyCJqC0AyEfsLINkdOA4Y9NDMau3TpIsWLF9e+wWbT0E8//eT7GU1EyJGIjY2V8uXLB3XMYDGwERG5iZG25JFgdezYUY4fP66drtGZGiM8LVmyxJf8gXwHZDWaDh06JDVr1vQ9Hjt2rC5NmjTRAeuDOWawPEa6jOFCRESZCan56AvXoM3Lkv26GNv7X7t6WdZ+MVSHEQyXvn1OsY0tC8FQM6VLl5aYmBjtlb9+/XqJdHanMooUqP5BZ9g8efLoiA3t27fX0Rsi3ZQpUzQ72kyQQNvNF198Ia5ihL6NLdwxsGUR5lAzSFHevHmzjqGGoWaOHTsmkczpVEZut3LlSnnqqadk3bp1smzZMrl69aq0bNlSr1ckK1GihLz66qvaERjdh+68805p166djmNI7sGqyCwCJTTcgU+aNMmXLYSOlL1797Y11IybocT2ySefaOmE/KHdAiU3BLzGjRtn9umEFQzwgEF3e/ToIW6oimzYynlV5JovWRVJGSQUw9dQZDGnX8KXOP0Jo1zMmzdPS7FOU9TDktdwvrgEsyKzgFAMX0ORA6V7TA+CeQgxJ2Gk27ZtmwYyDLOHVHOU8itXriyuYXA+NgY2IpdDW9v27dtTnNUikmAqK/SfQin2ww8/1H5TqKJ1S3DzOJwNO3Mn5UpfDGxZQCiGr6HI8PTTT8vnn3+u2aNInKA/R7gwOwRjpIsNGzbIm2++KW+//ba4gpEx/djCGdvYsoBQDF9D7oacMAQ1VLNhRgvMckGB4W8p0NQslHWxxJZFpNdQM26T2lRGkVz9OHfuXFm4cKH2ZcMoDoCsuZw5c0qkwtRWbdq00c8GZqzGNcKoF19++aW4hSeDhtQKZwxsWUR6DTXjNqlNZRTJHZHNGTGs3nnnHenWrZtEKvT7xBiGmOIKQR6dtRHUWrRoIa5hMHmE/diIiFzA7MfW6I5hkj27g35s1y7LqhUjXNGPjSU2IiI38f61ONnPJRjYiIhcxGMYujjZzy2YFUlERK7CEhsRkZsYTB5hYCMichODHbQZ2IiIXMTDfmwMbERErmKwxMbARkTkIh7vn4uT/dyCWZFElCYYySS1yV0xbBUmgj19+nSGnRdFLga2CIShuZ588kkdLy9Hjhw6Q0CrVq3ku+++k6zIbe8nFBBUzAWjU2BuNgyOnB4wMr51+DIM44X536waNmzoG8aKMqgq0nCwuASrIiNQhw4ddFbu9957T8qWLavT32CmgJMnT4b0dfGamKnALe8nq8E4ka1bt9aJa1988UW5++67dZ42XLO0CCZY4ffOKZYyiMF0f5bYIgyqglatWiWvvfaaDh5cqlQpnS0Ao57fe++9vu327dsn7dq10xmGMW7c3/72N7/54AJVP+Eu3TroLn7G1ClYjznlUIqCHTt26JcqjouR5xs1aiS7d+/27TdjxgypVKmSxMTESMWKFeWf//xnmt8Ptnvsscfkhhtu0Ne988475YcffvA71quvvqqDSuOcevToIYMGDdLBplMqieAaWAcVxvQnzz77rBQvXlxy584t9evX12o4E0o2+fLl04F38R5xfRFsUJqxmjlzplSpUkVLoEWLFtXraOe9BILXRXDBLNoYJPnSpUuybNkyfQ4TbeK6ma+H937t2jXfvpiQs2rVqjozwPXXXy/NmzfX2SUSfxbwM46FUpxZQvz9998DVkV+9NFHvvdYunRpGTdunN/5Yt0//vEPefTRR/V3ghL5tGnTUn2fkc7z18gjTha3YGCLMPgixbJgwYJk56DC/FQIaqdOndIvKXz5/fbbbzrDgF0oReFuHdWCU6dOlYMHD0rjxo31ywxVYZs2bdIvLvNLdM6cOTqDwSuvvCI///yzfrENGTJEj+P0/cCDDz6oI7t/8cUX+pq1atWSZs2a6XuE//znPzJ8+HB9PcwYgC/3lAJqchCA1q5dK/PmzZMff/xRXxeB69dff/Vtc/HiRRk7dqzMnj1bJwDFTQSCoQlBB9POPP7447Jt2zb59NNPfRNjBvNegmFOXYOSLn4nd911l9StW1cDJF7/X//6l4waNUq3QdDt3Lmz/p7wO0GQuv/++3XOt8QQ0DBHYM+ePXU/LCVLlkyyHc4bN0udOnXS94hrj99z4hkZEOwwVdOWLVukV69eWuX8yy+/BP0+I5LBqkh8OCnCfPjhh0b+/PmNmJgYo2HDhsbgwYONH374wff80qVLjaioKGPfvn2+dTt27NAKjvXr1+vjrl27Gu3atfM7bp8+fYwmTZr4HuPnmjVr+m2D1ypTpowRHx8f8NzKlStnzJ0712/dyJEjjQYNGjh+P6tWrTLi4uKMy5cvJ3mtt99+W3/G8Xv16uX3fP369Y3q1av7vR+8RytcA1wL2Lt3r163gwcP+m3TrFkzPSd455139Dru2rXL9/zkyZONwoUL+x4XK1bMePHFFwO+12DeSyB4zU8++UR/vnDhgr5XnCuu0wsvvGBUqFDB8Hq9fucUGxtrJCQkGJs2bdL9f//994DHTvxZCHSdvvnmGz3GH3/8oY8feugho0WLFn7bPPfcc0blypV9j0uVKmU88sgjvsc4v0KFChlTpkxJ9n1GsjNnzug1blprsNGi7gjbC/bD/jhOVscSWwRCm9ShQ4e0JIDSBO7Acddv3i3jrhx32dY77cqVK2tVFp6zAzN/W2ESUFQ9XnfddUm2RdUWqiRRDWiWxLCg5GCtqrT7flAKwYSkqEKzHheTkprHxftCtaGV3dnJUfJISEiQm2++2e91UOq1nn+uXLmkXLlyvscoHaIEBvgX7wUlsECCeS/JQakL26JaD9WAKJVhPjK8d7xXVBWakFyC1zlw4IBUr15dzwdVkSgtTp8+Xf744w9JC7wmXsMKj1GyxTU04fxMOD9UpZrXiig5TB6JUGi/wuSKWFAFhDabYcOGBT0JZbZs2ZJURV29ejXJdmhnskpp9mZ8kQK+OBMHmaioKMfvB8dF8LC2dZkQrIOV2nvG6+A8Uc2W+HwRUEyJgzq+sM3jpja7dVreyxtvvKFtY0j2QPtcsPBeUB29Zs0aWbp0qUycOFGTT77//nspU6aMhFKga4Wqckqeh6P7s42N/lciM5MBkNSwf/9+XUw//fSTNvxjO8AXY+KEB5TGUoM7cCR7BAqCSNwoVqyYtuehTcm62P0Ctb4flN4w63j27NmTHBdJLeZ7xhe11bp16/weJ37PKFkgq9BUs2ZNXYcSReLXCTYjEKUpJE0gqzOQYN5LcnAO2C5xUMN7R7ugNWijTRTnUqJECV9AQYlqxIgR2t6FdtNPPvkk4OvgOWupKxC8ZuLuGHiM0m5qNzGUCsNpO5u4BgNbhEEKPLLo3n//fU1uQBXWBx98IGPGjNGEEcBdPaqdHn74Ydm8ebOsX79eunTpIk2aNNGGfMAxkGQxa9YsrT5C6cj6JZ9ScgVm+kXSAPbHvkiiMBMC8MU5evRoeeutt+S///2vVu8hTX38+PFpej+oakPmHkocyNJD6QOlDpwD9OnTRzMR8Vp4XbwfZG9a4XUWLVqky86dOzWRwZrlhy9lXDNcq48//ljPBdcO7wf7BAuJFEiawDXA9cHvAKWkYN+LXUjKwE1M79699X0tXLhQ33///v21lIqAbybVINEF7w19BxGcAkFgxj44N3QtCFTCGjBggAbvkSNH6vVGctCkSZP8kmjIIYPJIwxsEQZVYqjmQ7UUshOR+o2qO2Sx4YvFvDvHl1v+/Pl1G3yZoq/T/PnzfcdB6j72GzhwoGbTnTt3Tr/QU4O2IWRDokoNgRJtcKh6NKucUIWIdH8EGARXbIO2suRKbMG+n8WLF+vz3bt31wCEwLp3714tJQIyPs33g3PCcwhcVsgK7Nq1qy/I45qgi4EVzhvP44u7QoUKGoA2bNigqerBwmtMmDBBszKRDo+uEWZWZTDvxS50TcAxEYTRnvbEE09oO+dLL72kz6NLAbI3kTmJ18N6BN42bdoEPB6CE0pdKDWjdIhgGKjkiUxUZI/id4ZM2JdffjnoqnAKYgZtr4PFpsmTJ+uNDJoC8HeIz1BKcNOJLjzYHn/f+NxZ4fdvHUwAC9rN7fIgg8T2XkQRACUndCMIpoqVKLOhJgTtp81uGSjZo3LY3v9awhVZvn2MnDlzRm9mUoMbXdzEoRsPghpuxhC4UPtSqFChJNujZgE3ZKjBwM3a3Llztf8paiRwc2MGNvSXxQ2iCV2DcJNtB0tsRERkG5oHUDOCmgOUzhHgkPGLKv1A0McRpa/nnntOq7FRDY2Su1mzYjKHxTMXu0ENGNiIiNzESFsbG0p+1iXQwAfo2I/sXzRTmNAei8dIRAoE663bm00aibdHxi9KfKjKR3OAk6HxGNiIUqiKZDUkRVpgK1mypFZpmguqDhNDUhAyXxO36+IxsnYDwfrUtkeJDglpSCxCNSX6gKItN7Us28TYj42IyE2MtE00igxZaxsbqgYzChKhTEguQfcgDGaAUlxygxYEwhIbEZGbeNOWFYmgZl0CBTb0mUTmq3VgdMDj5PpsYr2d7QGZx3itXbt22boEDGxERC7iyYDR/dEJH91irAMJoL8iHic3FB3WJx54ACPapDR0HYZ0QxsbRtuxg4GNiIhsQwd+9EFF53qM/YlED4z2gyxJQFcATB9lwiAIS5Ys0T6QGAgAbdjo9G9OyYS+rciYxIg/6NyPIIhBFjBajjnlVbDYxkZE5CZG2trYgoVBDTACDTrXIwEEcxcicJkJIuiYj0xJ6yzq6LuGDv4vvPCC3HTTTdpP1OzDhqpNjB6EQIkRfTC8XsuWLbVbgN12PnbQJiJyUQft5uX6Ou6g/dXuCUF30A5nLLEREbmJkTEltnDGwEZE5CqGwyDFwEZEROHIYImNWZFEROQqLLEREbmJV2cadbifOzCwERG5ieH9c3Gyn0swsBERuYnBNjYGNiIiN/GyKpKBjYjITQyW2JgVSURErsISGxGRmxgOS1/uKbAxsBERuYrBqkgGNiIiN/FaZg21vZ87MLAREbmJwRIbAxsRkZsYDGzMiiQiIldhiY2IyE287KDNwEZE5CKG4dXFyX5uwcBGROQmhuGs9OWiNjYGNiIiNzEcVkUysBERUVjyekU8kT1tDbMiiYjIVVhiIyJyE4NVkQxsREQuYni9YjioimRWJBERhSeDJTYGNiIiN/EaIh4GNiIicgsDAcob0YGNWZFEROQqLLEREbmI4TXEcFAVabDERkREYcnwOl9smjx5spQuXVpiYmKkfv36sn79+hS3/+CDD6RixYq6fdWqVWXx4sX+p24YMnToUClatKjkzJlTmjdvLr/++qvt82JgIyJyW4nN62yxY/78+dK/f38ZNmyYbN68WapXry6tWrWSY8eOBdx+zZo10rlzZ+nRo4ds2bJF2rdvr8v27dt924wZM0beeustmTp1qnz//feSO3duPebly5dtnZvHcFP5k4goQp09e1by5s0rd0g7ye65zvb+14yrskIWypkzZyQuLi7V7VFCq1u3rkyaNEkfe71eKVmypPTu3VsGDRqUZPuOHTvKhQsX5PPPP/etu/XWW6VGjRoayBCKihUrJgMGDJBnn31Wn8e5FC5cWN59913p1KlT0O+FJTYiIhe5Jlc1SNle5KovQFqXK1euJHmN+Ph42bRpk1YVmrJly6aP165dG/C8sN66PaA0Zm6/Z88eOXLkiN82CNQIoMkdMzlMHiEicoHo6GgpUqSIrD7i325lR2xsrJa6rFDVOHz4cL91J06ckISEBC1NWeHxzp07Ax4bQSvQ9lhvPm+uS26bYDGwERG5ABIyUOqJj493fAxUB3o8Hr91OXLkkKyGgY2IyEXBLSYmJuSvU7BgQYmKipKjR4/6rcdjlBoDwfqUtjf/xTpkRVq3QTucHWxjIyIi29WetWvXluXLl/vWIXkEjxs0aBBwH6y3bg/Lli3zbV+mTBkNbtZt0MaH7MjkjpkcltiIiMg2pPp37dpV6tSpI/Xq1ZMJEyZo1mP37t31+S5dukjx4sVl9OjR+rhPnz7SpEkTGTdunLRt21bmzZsnGzdulGnTpunzqALt27evjBo1Sm666SYNdEOGDNFMSXQLsIOBjYiIbEP6/vHjx7VDNZI7UF24ZMkSX/LHvn37NFPS1LBhQ5k7d6689NJL8sILL2jwWrBggdxyyy2+bQYOHKjB8fHHH5fTp0/L7bffrse0W73KfmxEROQqbGMjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJXYWAjIiJxk/8H2oksCJdgugEAAAAASUVORK5CYII=",
      "text/plain": [
       "<Figure size 1000x400 with 2 Axes>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "source": [
    "# 测试BahdanauAttention\n",
    "def test_bahdanau_attention():\n",
    "    # 设置参数\n",
    "    batch_size = 2\n",
    "    hidden_size = 8\n",
    "    src_len = 4\n",
    "    \n",
    "    # 创建输入数据\n",
    "    query = torch.randn(batch_size, hidden_size)  # Decoder的隐藏状态\n",
    "    keys = torch.randn(batch_size, src_len, hidden_size)  # Encoder输出\n",
    "    values = torch.randn(batch_size, src_len, hidden_size)  # 通常与keys相同\n",
    "    \n",
    "    # 创建注意力掩码，模拟序列填充的情况\n",
    "    attn_mask = torch.zeros(batch_size, src_len)\n",
    "    attn_mask[0, 0] = 1  # 假设第一个样本的最后一个token是padding\n",
    "    # attn_mask[1, 2:] = 1  # 假设第二个样本的最后两个tokens是padding\n",
    "    \n",
    "    # 初始化Bahdanau注意力机制\n",
    "    attention = BahdanauAttention(hidden_size, hidden_size)\n",
    "    \n",
    "    # 前向传播\n",
    "    context, attn_weights = attention(query, keys, values, attn_mask)\n",
    "    \n",
    "    # 打印结果\n",
    "    print(f\"Query shape: {query.shape}\")\n",
    "    print(f\"Keys shape: {keys.shape}\")\n",
    "    print(f\"Values shape: {values.shape}\")\n",
    "    print(f\"Context vector shape: {context.shape}\")\n",
    "    print(f\"Attention weights shape: {attn_weights.shape}\")\n",
    "    \n",
    "    # 验证注意力权重是否在掩码位置接近于0\n",
    "    print(\"\\nAttention weights:\")\n",
    "    print(attn_weights)\n",
    "    \n",
    "    # 可视化注意力权重\n",
    "    plt.figure(figsize=(10, 4))\n",
    "    plt.subplot(1, 2, 1)\n",
    "    plt.imshow(attn_weights.detach().numpy(), cmap='viridis')\n",
    "    plt.colorbar()\n",
    "    plt.title('Attention Weights')\n",
    "    plt.xlabel('Source Sequence Position')\n",
    "    plt.ylabel('Batch Sample')\n",
    "    \n",
    "    return context, attn_weights\n",
    "\n",
    "# 运行测试\n",
    "context, attn_weights = test_bahdanau_attention()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bd474a29",
   "metadata": {},
   "source": [
    "# Decoder 解码器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "40c791bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Decoder(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        vocab_size,\n",
    "        embedding_dim=256,\n",
    "        hidden_dim=1024,\n",
    "        num_layers=1,\n",
    "        ):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        self.gru = nn.GRU(embedding_dim + hidden_dim, hidden_dim, num_layers=num_layers, batch_first=True)\n",
    "        self.fc = nn.Linear(hidden_dim, vocab_size) #最后分类,词典大小是多少，就输出多少个分类\n",
    "        self.dropout = nn.Dropout(0.6) #0.6可以调整的超参数\n",
    "        self.attention = BahdanauAttention(hidden_dim) #注意力得到的context_vector\n",
    "\n",
    "    def forward(self, decoder_input, hidden, encoder_outputs, attn_mask=None):\n",
    "        \"\"\"\n",
    "        参数:\n",
    "            decoder_input: 解码器的输入，形状为 [batch_size, 1]\n",
    "            hidden: 解码器的隐藏状态，形状为 [batch_size, hidden_dim],第一次使用的是encoder的hidden\n",
    "            encoder_outputs: 编码器的输出，形状为 [batch_size, sequence_length, hidden_dim]\n",
    "            attn_mask: 注意力掩码，形状为 [batch_size, sequence_length],是encoder_inputs_mask\n",
    "        \n",
    "        返回:\n",
    "            logits: 解码器的输出，形状为 [batch_size, 1, vocab_size]\n",
    "            hidden: 解码器的隐藏状态，形状为 [batch_size, hidden_dim]\n",
    "            attention_score: 注意力权重，形状为 [batch_size, sequence_length, 1]\n",
    "        \"\"\"\n",
    "        #断言，确保输入的形状是正确的\n",
    "        # decoder_input.shape = [batch size, 1]\n",
    "        assert len(decoder_input.shape) == 2 and decoder_input.shape[-1] == 1, f\"decoder_input.shape = {decoder_input.shape} is not valid\"\n",
    "        # hidden.shape = [num_layers, batch size, hidden_dim]，decoder_hidden,而第一次使用的是encoder的hidden\n",
    "        assert len(hidden.shape) == 3, f\"hidden.shape = {hidden.shape} is not valid\"\n",
    "        # encoder_outputs.shape = [batch size, sequence length, hidden_dim]\n",
    "        assert len(encoder_outputs.shape) == 3, f\"encoder_outputs.shape = {encoder_outputs.shape} is not valid\"\n",
    "        # context_vector.shape = [batch_size, hidden_dim]\n",
    "        \n",
    "        # 注意力机制\n",
    "        context_vector, attention_score = self.attention(\n",
    "            query=hidden[-1], keys=encoder_outputs, values=encoder_outputs, attn_mask=attn_mask)\n",
    "        # decoder_input.shape = [batch size, 1]-->embeds.shape = [batch size, 1, embedding_dim]\n",
    "        embeds = self.embedding(decoder_input)\n",
    "\n",
    "        # context_vector.shape = [batch size, hidden_dim] -->unsqueeze(-2)增加维度 [batch size, 1, hidden_dim]\n",
    "        embeds = torch.cat([context_vector.unsqueeze(-2), embeds], dim=-1)\n",
    "        # 新的embeds.shape = [batch size, 1, embedding_dim + hidden_dim]\n",
    "        seq_output, hidden = self.gru(embeds, hidden) #这里可以把hidden去掉对比一下最终的bleu指标\n",
    "        # seq_output.shape = [batch size, 1, hidden_dim]\n",
    "        logits = self.fc(self.dropout(seq_output))\n",
    "        # logits.shape = [batch size, 1, vocab size]，attention_score = [batch size, sequence length, 1]\n",
    "        return logits, hidden, attention_score\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "16b56c8a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "decoder_input.shape: torch.Size([2, 1])\n",
      "decoder_hidden.shape: torch.Size([1, 2, 32])\n",
      "encoder_outputs.shape: torch.Size([2, 5, 32])\n",
      "logits.shape: torch.Size([2, 1, 1000])\n",
      "hidden.shape: torch.Size([1, 2, 32])\n",
      "attention_score.shape: torch.Size([2, 5])\n"
     ]
    }
   ],
   "source": [
    "# 前向计算验证Decoder是否ok\n",
    "batch_size = 2\n",
    "seq_len = 5\n",
    "hidden_dim = 32\n",
    "vocab_size = 1000\n",
    "embedding_dim = 64\n",
    "num_layers = 1\n",
    "\n",
    "# 创建模拟数据\n",
    "decoder_input = torch.randint(0, vocab_size, (batch_size, 1))  # [batch_size, 1]\n",
    "decoder_hidden = torch.randn(num_layers, batch_size, hidden_dim)  # [num_layers, batch_size, hidden_dim]\n",
    "encoder_outputs = torch.randn(batch_size, seq_len, hidden_dim)  # [batch_size, seq_len, hidden_dim]\n",
    "attn_mask = torch.ones(batch_size, seq_len)  # [batch_size, seq_len]\n",
    "\n",
    "# 创建Decoder模型\n",
    "decoder = Decoder(vocab_size, embedding_dim, hidden_dim)\n",
    "\n",
    "# 前向计算\n",
    "logits, hidden, attention_score = decoder(decoder_input, decoder_hidden, encoder_outputs, attn_mask)\n",
    "\n",
    "# 打印输出形状\n",
    "print(f\"decoder_input.shape: {decoder_input.shape}\")\n",
    "print(f\"decoder_hidden.shape: {decoder_hidden.shape}\")\n",
    "print(f\"encoder_outputs.shape: {encoder_outputs.shape}\")\n",
    "print(f\"logits.shape: {logits.shape}\")\n",
    "print(f\"hidden.shape: {hidden.shape}\")\n",
    "print(f\"attention_score.shape: {attention_score.shape}\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "df2a637a",
   "metadata": {},
   "source": [
    "# Sequence2Sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7f01628",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Seq2Seq(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        src_vocab_size, #输入词典大小\n",
    "        trg_vocab_size, #输出词典大小\n",
    "        encoder_embedding_dim=256,\n",
    "        encoder_hidden_dim=1024, #encoder_hidden_dim和decoder_hidden_dim必须相同，是因为BahdanauAttention设计的\n",
    "        encoder_num_layers=1,\n",
    "        decoder_embedding_dim=256,\n",
    "        decoder_hidden_dim=1024,\n",
    "        decoder_num_layers=1,\n",
    "        bos_idx=1,\n",
    "        eos_idx=3,\n",
    "        max_length=512,\n",
    "        device=None,\n",
    "        ):\n",
    "        super().__init__()\n",
    "        self.bos_idx = bos_idx\n",
    "        self.eos_idx = eos_idx\n",
    "        self.max_length = max_length\n",
    "        self.device = device\n",
    "        self.encoder = Encoder(\n",
    "            src_vocab_size,\n",
    "            embedding_dim=encoder_embedding_dim,\n",
    "            hidden_size=encoder_hidden_dim,\n",
    "            num_layers=encoder_num_layers,\n",
    "            )\n",
    "        self.decoder = Decoder(\n",
    "            trg_vocab_size,\n",
    "            embedding_dim=decoder_embedding_dim,\n",
    "            hidden_dim=decoder_hidden_dim,\n",
    "            num_layers=decoder_num_layers,\n",
    "            )\n",
    "        \n",
    "    def forward(self, src, decoder_input, src_mask=None, teacher_forcing_ratio=0.5):\n",
    "        \"\"\"\n",
    "        训练时的前向传播\n",
    "        \n",
    "        Args:\n",
    "            src: 源序列 [batch_size, src_len]\n",
    "            tgt: 目标序列 [batch_size, tgt_len]\n",
    "            src_mask: 源序列的mask [batch_size, src_len]\n",
    "            teacher_forcing_ratio: 使用teacher forcing的概率\n",
    "            \n",
    "        Returns:\n",
    "            outputs: 所有时间步的输出 [batch_size, tgt_len, vocab_size]\n",
    "        \"\"\"\n",
    "        batch_size = src.shape[0] \n",
    "        tgt_len = decoder_input.shape[1]\n",
    "        \n",
    "        # 编码器前向传播\n",
    "        encoder_outputs, hidden = self.encoder(src)\n",
    "\n",
    "        logits_list = []\n",
    "        scores_list = []\n",
    "        for t in range(tgt_len):\n",
    "            # 解码器前向传播,output.shape = [batch_size, 1, vocab_size]\n",
    "            #attention_scores.shape = [batch_size, src_len, 1]\n",
    "            logits, hidden, attention_scores = self.decoder(decoder_input[:, t:t+1], hidden, encoder_outputs, src_mask)\n",
    "            \n",
    "            logits_list.append(logits)\n",
    "            scores_list.append(attention_scores)\n",
    "            \n",
    "        # 返回输出和注意力分数\n",
    "        #logits_list.shape (batch_size, tgt_len, vocab_size)\n",
    "        #scores_list.shape (batch_size, src_len, tgt_len)\n",
    "        return torch.cat(logits_list, dim=-2), torch.cat(scores_list, dim=-1)\n",
    "    \n",
    "    @torch.no_grad()\n",
    "    def infer(self, src, src_mask=None):\n",
    "        \"\"\"\n",
    "        推理时的前向传播\n",
    "        \n",
    "        Args:\n",
    "            src: 源序列 [batch_size, src_len]\n",
    "            src_mask: 源序列的mask [batch_size, src_len],一个样本mask没有用，多个样本mask有用\n",
    "            \n",
    "        Returns:\n",
    "            outputs: 生成的序列 [batch_size, max_len]\n",
    "            attention_scores: 注意力分数 [batch_size, src_len, max_len]\n",
    "        \"\"\"\n",
    "        batch_size = src.shape[0]\n",
    "        src_len = src.shape[1]\n",
    "        \n",
    "        # 编码器前向传播\n",
    "        encoder_outputs, hidden = self.encoder(src)\n",
    "        \n",
    "        # 存储生成的序列\n",
    "        outputs = torch.zeros(batch_size, self.max_length).long().to(self.device) #shape (bs, max_length)\n",
    "        \n",
    "        # 第一个解码器输入是特殊的开始符号 shape (bs,1)\n",
    "        decoder_input = torch.tensor([[self.bos_idx]] * batch_size).to(self.device)\n",
    "        \n",
    "        # 记录每个序列是否已经生成了结束符号，shape (bs,)，做结束标记用，如果为True，则表示该序列已经生成了结束符号\n",
    "        finished = torch.zeros(batch_size).bool().to(self.device)\n",
    "        score_list = [] #为了画图，记录注意力分数\n",
    "        for t in range(self.max_length):\n",
    "            # 解码器前向传播,output.shape = [batch_size, 1, vocab_size]\n",
    "            output, hidden, scores = self.decoder(decoder_input, hidden, encoder_outputs, src_mask)\n",
    "            \n",
    "            # 存储注意力分数 scores.shape = [batch_size, src_len, 1]\n",
    "            score_list.append(scores)\n",
    "            \n",
    "            # 获取最可能的单词索引\n",
    "            pred = output.argmax(dim=-1) #pred shape (bs,1)\n",
    "            \n",
    "            # 保存预测\n",
    "            outputs[:, t] = pred.squeeze(1)\n",
    "            \n",
    "            # 如果所有序列都生成了结束符号，则提前结束\n",
    "            finished = finished | (pred.squeeze(1) == self.eos_idx)\n",
    "            if finished.all():#finished.all()是判断finished是否全为True\n",
    "                break\n",
    "                \n",
    "            # 使用当前预测作为下一个输入\n",
    "            decoder_input = pred\n",
    "            \n",
    "        return outputs, torch.stack(score_list)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c4bb349",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([2, 60, 12479])\n",
      "torch.Size([2, 3000])\n"
     ]
    }
   ],
   "source": [
    "model = Seq2Seq(src_vocab_size=len(src_word2idx), trg_vocab_size=len(trg_word2idx))\n",
    "#做model的前向传播，看看输出的shape\n",
    "encoder_inputs = torch.randint(0, 100, (2, 50)) #shape (bs, src_len) (2,50)\n",
    "decoder_inputs = torch.randint(0, 100, (2, 60)) #shape (bs, tgt_len) (2,60)\n",
    "attn_mask = torch.randint(0, 2, (2, 50)) #shape (bs, src_len) (2,50)\n",
    "logits, scores = model(src=encoder_inputs, decoder_input=decoder_inputs, src_mask=attn_mask)\n",
    "print(logits.shape)\n",
    "print(scores.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "b31eacff",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "模型总参数量: 35,182,783\n",
      "编码器参数量: 10,012,928 (28.46%)\n",
      "解码器参数量: 25,169,855 (71.54%)\n"
     ]
    }
   ],
   "source": [
    "# 计算模型的参数量\n",
    "def count_parameters(model):\n",
    "    return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "\n",
    "# 统计总参数量\n",
    "total_params = count_parameters(model)\n",
    "print(f\"模型总参数量: {total_params:,}\")\n",
    "\n",
    "# 统计每个组件的参数量\n",
    "encoder_params = count_parameters(model.encoder)\n",
    "decoder_params = count_parameters(model.decoder)\n",
    "\n",
    "print(f\"编码器参数量: {encoder_params:,} ({encoder_params/total_params:.2%})\")\n",
    "print(f\"解码器参数量: {decoder_params:,} ({decoder_params/total_params:.2%})\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "d355f8cc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4层模型总参数量: 72,968,383\n",
      "4层编码器参数量: 28,905,728 (39.61%)\n",
      "4层解码器参数量: 44,062,655 (60.39%)\n",
      "\n",
      "参数量增加: 37,785,600 (2.07倍)\n"
     ]
    }
   ],
   "source": [
    "# 创建具有4层的模型\n",
    "model_4layers = Seq2Seq(\n",
    "    src_vocab_size=len(src_word2idx), \n",
    "    trg_vocab_size=len(trg_word2idx),\n",
    "    encoder_num_layers=4,  # 编码器4层\n",
    "    decoder_num_layers=4   # 解码器4层\n",
    ")\n",
    "\n",
    "# 统计总参数量\n",
    "total_params_4layers = count_parameters(model_4layers)\n",
    "print(f\"4层模型总参数量: {total_params_4layers:,}\")\n",
    "\n",
    "# 统计每个组件的参数量\n",
    "encoder_params_4layers = count_parameters(model_4layers.encoder)\n",
    "decoder_params_4layers = count_parameters(model_4layers.decoder)\n",
    "\n",
    "print(f\"4层编码器参数量: {encoder_params_4layers:,} ({encoder_params_4layers/total_params_4layers:.2%})\")\n",
    "print(f\"4层解码器参数量: {decoder_params_4layers:,} ({decoder_params_4layers/total_params_4layers:.2%})\")\n",
    "\n",
    "# 与默认层数模型比较\n",
    "print(f\"\\n参数量增加: {total_params_4layers - total_params:,} ({total_params_4layers/total_params:.2f}倍)\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "eda38413",
   "metadata": {},
   "source": [
    "# 损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "c49591f6",
   "metadata": {},
   "outputs": [],
   "source": [
    "def cross_entropy_with_padding(logits, labels, padding_mask=None):\n",
    "    # logits.shape = [batch size, sequence length, num of classes]\n",
    "    # labels.shape = [batch size, sequence length]\n",
    "    # padding_mask.shape = [batch size, sequence length] decoder_labels_mask\n",
    "    bs, seq_len, nc = logits.shape\n",
    "    loss = F.cross_entropy(logits.reshape(bs * seq_len, nc), labels.reshape(-1), reduce=False) #reduce=False表示不对batch求平均\n",
    "    if padding_mask is None:#如果没有padding_mask，就直接求平均\n",
    "        loss = loss.mean()\n",
    "    else:\n",
    "        # 如果提供了 padding_mask，则将padding填充部分的损失去除后计算有效损失的均值。首先，通过将 padding_mask reshape 成一维张量，并取 1 减去得到填充掩码。这样填充部分的掩码值变为 1，非填充部分变为 0。将损失张量与填充掩码相乘，这样填充部分的损失就会变为 0。然后，计算非填充部分的损失和（sum）以及非填充部分的掩码数量（sum）作为有效损失的均值计算。(因为上面我们设计的mask的token是0，所以这里是1-padding_mask)\n",
    "        padding_mask = 1 - padding_mask.reshape(-1) #将padding_mask reshape成一维张量，mask部分为0，非mask部分为1\n",
    "        loss = torch.mul(loss, padding_mask).sum() / padding_mask.sum()\n",
    "\n",
    "    return loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "453c6ce3",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SaveCheckpointsCallback:\n",
    "    def __init__(self, save_dir, save_step=5000, save_best_only=True):\n",
    "        \"\"\"\n",
    "        Save checkpoints each save_epoch epoch.\n",
    "        We save checkpoint by epoch in this implementation.\n",
    "        Usually, training scripts with pytorch evaluating model and save checkpoint by step.\n",
    "\n",
    "        Args:\n",
    "            save_dir (str): dir to save checkpoint\n",
    "            save_epoch (int, optional): the frequency to save checkpoint. Defaults to 1.\n",
    "            save_best_only (bool, optional): If True, only save the best model or save each model at every epoch.\n",
    "        \"\"\"\n",
    "        self.save_dir = save_dir\n",
    "        self.save_step = save_step\n",
    "        self.save_best_only = save_best_only\n",
    "        self.best_metrics = - np.inf\n",
    "\n",
    "        # mkdir\n",
    "        if not os.path.exists(self.save_dir):\n",
    "            os.mkdir(self.save_dir)\n",
    "\n",
    "    def __call__(self, step, state_dict, metric=None):\n",
    "        if step % self.save_step > 0:\n",
    "            return\n",
    "\n",
    "        if self.save_best_only:\n",
    "            assert metric is not None\n",
    "            if metric >= self.best_metrics:\n",
    "                # save checkpoints\n",
    "                torch.save(state_dict, os.path.join(self.save_dir, \"best.ckpt\"))\n",
    "                # update best metrics\n",
    "                self.best_metrics = metric\n",
    "        else:\n",
    "            torch.save(state_dict, os.path.join(self.save_dir, f\"{step}.ckpt\"))\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "9848a5f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "class EarlyStopCallback:\n",
    "    def __init__(self, patience=5, min_delta=0.01):\n",
    "        \"\"\"\n",
    "\n",
    "        Args:\n",
    "            patience (int, optional): Number of epochs with no improvement after which training will be stopped.. Defaults to 5.\n",
    "            min_delta (float, optional): Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute\n",
    "                change of less than min_delta, will count as no improvement. Defaults to 0.01.\n",
    "        \"\"\"\n",
    "        self.patience = patience\n",
    "        self.min_delta = min_delta\n",
    "        self.best_metric = - np.inf\n",
    "        self.counter = 0\n",
    "\n",
    "    def __call__(self, metric):\n",
    "        if metric >= self.best_metric + self.min_delta:\n",
    "            # update best metric\n",
    "            self.best_metric = metric\n",
    "            # reset counter\n",
    "            self.counter = 0\n",
    "        else:\n",
    "            self.counter += 1\n",
    "\n",
    "    @property #@property 装饰器将方法转换为属性，这样就可以通过实例.early_stop 来访问方法，而不是实例.early_stop()\n",
    "    def early_stop(self):\n",
    "        return self.counter >= self.patience\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4bf6c187",
   "metadata": {},
   "source": [
    "# 训练与评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6d9e903",
   "metadata": {},
   "outputs": [],
   "source": [
    "def evaluate(model, dataloader, loss_fct):\n",
    "    \"\"\"\n",
    "    Evaluate the model on the validation/test set\n",
    "    \n",
    "    Args:\n",
    "        model: the seq2seq model with attention\n",
    "        dataloader: validation/test dataloader\n",
    "        loss_fct: loss function\n",
    "    \n",
    "    Returns:\n",
    "        float: average loss on the dataset\n",
    "    \"\"\"\n",
    "    model.eval()\n",
    "    total_loss = 0\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for batch in dataloader:\n",
    "            # Unpack the batch\n",
    "            encoder_inputs = batch[\"encoder_inputs\"]\n",
    "            encoder_inputs_mask = batch[\"encoder_inputs_mask\"]\n",
    "            decoder_inputs = batch[\"decoder_inputs\"]\n",
    "            decoder_labels = batch[\"decoder_labels\"]\n",
    "            decoder_labels_mask = batch[\"decoder_labels_mask\"]\n",
    "            \n",
    "            # Forward pass\n",
    "            outputs,_ = model(\n",
    "                src=encoder_inputs,\n",
    "                src_mask=encoder_inputs_mask,\n",
    "                decoder_input=decoder_inputs\n",
    "            )\n",
    "            \n",
    "            # Calculate loss\n",
    "            loss = loss_fct(\n",
    "                outputs,\n",
    "                decoder_labels,\n",
    "                decoder_labels_mask\n",
    "            )\n",
    "            \n",
    "            # Update loss\n",
    "            total_loss += loss.item() \n",
    "    \n",
    "    avg_loss = total_loss / len(dataloader)\n",
    "    \n",
    "    model.train()\n",
    "    return avg_loss\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "786a6a82",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练\n",
    "def training(\n",
    "    model,\n",
    "    train_loader,\n",
    "    val_loader,\n",
    "    epoch,\n",
    "    loss_fct,\n",
    "    optimizer,\n",
    "    save_ckpt_callback=None,\n",
    "    early_stop_callback=None,\n",
    "    eval_step=500,\n",
    "):\n",
    "    record_dict = {\"train\": [], \"val\": []}  # record_dict是字典，记录训练和验证的损失\n",
    "\n",
    "    global_step = 1\n",
    "    val_loss = 0\n",
    "    model.train()  # 切换到训练模式\n",
    "    with tqdm(total=epoch * len(train_loader)) as pbar:\n",
    "        for epoch_id in range(epoch):\n",
    "            # training\n",
    "            for batch in train_loader:\n",
    "                encoder_inputs = batch[\"encoder_inputs\"]\n",
    "                encoder_inputs_mask = batch[\"encoder_inputs_mask\"]\n",
    "                decoder_inputs = batch[\"decoder_inputs\"]\n",
    "                decoder_labels = batch[\"decoder_labels\"]\n",
    "                decoder_labels_mask = batch[\"decoder_labels_mask\"]\n",
    "\n",
    "                # 梯度清空\n",
    "                optimizer.zero_grad()\n",
    "\n",
    "                # 前向计算\n",
    "                logits, _ = model(\n",
    "                    src=encoder_inputs,\n",
    "                    src_mask=encoder_inputs_mask,\n",
    "                    decoder_input=decoder_inputs,\n",
    "                )\n",
    "                loss = loss_fct(\n",
    "                    logits, decoder_labels, decoder_labels_mask\n",
    "                )\n",
    "\n",
    "                # 梯度回传\n",
    "                loss.backward()\n",
    "\n",
    "                # 调整优化器，包括学习率的变动等\n",
    "                optimizer.step()\n",
    "\n",
    "                loss = loss.cpu().item()\n",
    "                # record\n",
    "                record_dict[\"train\"].append({\"loss\": loss, \"step\": global_step})\n",
    "\n",
    "                # evaluating\n",
    "                if global_step % eval_step == 0:\n",
    "                    val_loss = evaluate(model, val_loader, loss_fct) #evaluate是计算验证集的损失\n",
    "                    record_dict[\"val\"].append({\"loss\": val_loss, \"step\": global_step})\n",
    "\n",
    "\n",
    "                    # 2. 保存模型权重 save model checkpoint\n",
    "                    if save_ckpt_callback is not None:\n",
    "                        save_ckpt_callback(\n",
    "                            global_step, model.state_dict(), metric=-val_loss\n",
    "                        )\n",
    "\n",
    "                    # 3. 早停 Early Stop\n",
    "                    if early_stop_callback is not None:\n",
    "                        early_stop_callback(-val_loss)\n",
    "                        if early_stop_callback.early_stop:#early_stop_callback.early_stop是True，则早停\n",
    "                            print(\n",
    "                                f\"Early stop at epoch {epoch_id} / global_step {global_step}\"\n",
    "                            )\n",
    "                            return record_dict\n",
    "\n",
    "                # udate step\n",
    "                global_step += 1\n",
    "                pbar.update(1)\n",
    "                pbar.set_postfix(\n",
    "                    {\"epoch\": epoch_id,'global_step': global_step, \"loss\": loss, \"val_loss\": val_loss}\n",
    "                )  # 更新进度条\n",
    "\n",
    "    return record_dict\n",
    "\n",
    "\n",
    "epoch = 20\n",
    "batch_size = 64\n",
    "\n",
    "model = Seq2Seq(\n",
    "    src_vocab_size=len(src_word2idx),\n",
    "    trg_vocab_size=len(trg_word2idx),\n",
    "    encoder_num_layers=1,\n",
    "    decoder_num_layers=1,\n",
    ")\n",
    "\n",
    "# 为训练和验证数据创建DataLoader\n",
    "train_dl = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n",
    "test_dl = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_fn)\n",
    "\n",
    "\n",
    "# 1. 定义损失函数 采用交叉熵损失\n",
    "loss_fct = cross_entropy_with_padding\n",
    "# 2. 定义优化器 采用 adam\n",
    "# Optimizers specified in the torch.optim package\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "\n",
    "# 3. save best\n",
    "if not os.path.exists(\"checkpoints\"):\n",
    "    os.makedirs(\"checkpoints\")\n",
    "save_ckpt_callback = SaveCheckpointsCallback(\n",
    "    f\"checkpoints\", save_step=200, save_best_only=True\n",
    ")\n",
    "# 4. early stop\n",
    "early_stop_callback = EarlyStopCallback(patience=5)\n",
    "\n",
    "model = model.to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "7f7cd9cb",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "347313d04fb24e0c8c9350ca919e027a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/33500 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[57], line 1\u001b[0m\n\u001b[1;32m----> 1\u001b[0m record \u001b[38;5;241m=\u001b[39m \u001b[43mtraining\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m      2\u001b[0m \u001b[43m    \u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      3\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtrain_dl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      4\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtest_dl\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      5\u001b[0m \u001b[43m    \u001b[49m\u001b[43mepoch\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      6\u001b[0m \u001b[43m    \u001b[49m\u001b[43mloss_fct\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      7\u001b[0m \u001b[43m    \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      8\u001b[0m \u001b[43m    \u001b[49m\u001b[43msave_ckpt_callback\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43msave_ckpt_callback\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m      9\u001b[0m \u001b[43m    \u001b[49m\u001b[43mearly_stop_callback\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mearly_stop_callback\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m     10\u001b[0m \u001b[43m    \u001b[49m\u001b[43meval_step\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m200\u001b[39;49m\n\u001b[0;32m     11\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m\n",
      "Cell \u001b[1;32mIn[56], line 42\u001b[0m, in \u001b[0;36mtraining\u001b[1;34m(model, train_loader, val_loader, epoch, loss_fct, optimizer, save_ckpt_callback, early_stop_callback, eval_step)\u001b[0m\n\u001b[0;32m     37\u001b[0m loss \u001b[38;5;241m=\u001b[39m loss_fct(\n\u001b[0;32m     38\u001b[0m     logits, decoder_labels, mask\u001b[38;5;241m=\u001b[39mdecoder_labels_mask\n\u001b[0;32m     39\u001b[0m )\n\u001b[0;32m     41\u001b[0m \u001b[38;5;66;03m# 梯度回传\u001b[39;00m\n\u001b[1;32m---> 42\u001b[0m \u001b[43mloss\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     44\u001b[0m \u001b[38;5;66;03m# 调整优化器，包括学习率的变动等\u001b[39;00m\n\u001b[0;32m     45\u001b[0m optimizer\u001b[38;5;241m.\u001b[39mstep()\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\torch\\_tensor.py:581\u001b[0m, in \u001b[0;36mTensor.backward\u001b[1;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[0;32m    571\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m has_torch_function_unary(\u001b[38;5;28mself\u001b[39m):\n\u001b[0;32m    572\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m handle_torch_function(\n\u001b[0;32m    573\u001b[0m         Tensor\u001b[38;5;241m.\u001b[39mbackward,\n\u001b[0;32m    574\u001b[0m         (\u001b[38;5;28mself\u001b[39m,),\n\u001b[1;32m   (...)\u001b[0m\n\u001b[0;32m    579\u001b[0m         inputs\u001b[38;5;241m=\u001b[39minputs,\n\u001b[0;32m    580\u001b[0m     )\n\u001b[1;32m--> 581\u001b[0m \u001b[43mtorch\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mautograd\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    582\u001b[0m \u001b[43m    \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mgradient\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43minputs\u001b[49m\n\u001b[0;32m    583\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\torch\\autograd\\__init__.py:347\u001b[0m, in \u001b[0;36mbackward\u001b[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[0;32m    342\u001b[0m     retain_graph \u001b[38;5;241m=\u001b[39m create_graph\n\u001b[0;32m    344\u001b[0m \u001b[38;5;66;03m# The reason we repeat the same comment below is that\u001b[39;00m\n\u001b[0;32m    345\u001b[0m \u001b[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001b[39;00m\n\u001b[0;32m    346\u001b[0m \u001b[38;5;66;03m# calls in the traceback and some print out the last line\u001b[39;00m\n\u001b[1;32m--> 347\u001b[0m \u001b[43m_engine_run_backward\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m    348\u001b[0m \u001b[43m    \u001b[49m\u001b[43mtensors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    349\u001b[0m \u001b[43m    \u001b[49m\u001b[43mgrad_tensors_\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    350\u001b[0m \u001b[43m    \u001b[49m\u001b[43mretain_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    351\u001b[0m \u001b[43m    \u001b[49m\u001b[43mcreate_graph\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    352\u001b[0m \u001b[43m    \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m    353\u001b[0m \u001b[43m    \u001b[49m\u001b[43mallow_unreachable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    354\u001b[0m \u001b[43m    \u001b[49m\u001b[43maccumulate_grad\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\n\u001b[0;32m    355\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\torch\\autograd\\graph.py:825\u001b[0m, in \u001b[0;36m_engine_run_backward\u001b[1;34m(t_outputs, *args, **kwargs)\u001b[0m\n\u001b[0;32m    823\u001b[0m     unregister_hooks \u001b[38;5;241m=\u001b[39m _register_logging_hooks_on_whole_graph(t_outputs)\n\u001b[0;32m    824\u001b[0m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[1;32m--> 825\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mVariable\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_execution_engine\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mrun_backward\u001b[49m\u001b[43m(\u001b[49m\u001b[43m  \u001b[49m\u001b[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001b[39;49;00m\n\u001b[0;32m    826\u001b[0m \u001b[43m        \u001b[49m\u001b[43mt_outputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\n\u001b[0;32m    827\u001b[0m \u001b[43m    \u001b[49m\u001b[43m)\u001b[49m  \u001b[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001b[39;00m\n\u001b[0;32m    828\u001b[0m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[0;32m    829\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m attach_logging_hooks:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "record = training(\n",
    "    model,\n",
    "    train_dl,\n",
    "    test_dl,\n",
    "    epoch,\n",
    "    loss_fct,\n",
    "    optimizer,\n",
    "    save_ckpt_callback=save_ckpt_callback,\n",
    "    early_stop_callback=early_stop_callback,\n",
    "    eval_step=200\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "d709ee04",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "33500"
      ]
     },
     "execution_count": 59,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(train_dl)*20"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88daef25",
   "metadata": {},
   "outputs": [],
   "source": [
    "plt.plot([i[\"step\"] for i in record[\"train\"]], [i[\"loss\"] for i in record[\"train\"]], label=\"train\")\n",
    "plt.plot([i[\"step\"] for i in record[\"val\"]], [i[\"loss\"] for i in record[\"val\"]], label=\"val\")\n",
    "plt.grid()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "80f986f8",
   "metadata": {},
   "source": [
    "# 推理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "47c9d96d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# load checkpoints,如何上线,本地使用\n",
    "model = Seq2Seq(len(src_word2idx), len(trg_word2idx))\n",
    "model.load_state_dict(torch.load(f\"best.ckpt\", weights_only=True,map_location=\"cpu\"))\n",
    "\n",
    "class Translator:\n",
    "    def __init__(self, model, src_tokenizer, trg_tokenizer):\n",
    "        self.model = model\n",
    "        self.model.eval() # 切换到验证模式\n",
    "        self.src_tokenizer = src_tokenizer #源语言tokenizer  spa\n",
    "        self.trg_tokenizer = trg_tokenizer #目标语言tokenizer en\n",
    "\n",
    "    def draw_attention_map(self, scores, src_words_list, trg_words_list):\n",
    "        \"\"\"绘制注意力热力图\n",
    "\n",
    "        Args:\n",
    "            - scores (numpy.ndarray): shape = [source sequence length, target sequence length]\n",
    "        \"\"\"\n",
    "        plt.matshow(scores, cmap='viridis') # 注意力矩阵,显示注意力分数值\n",
    "        # 获取当前的轴\n",
    "        ax = plt.gca()\n",
    "\n",
    "        # 设置热图中每个单元格的分数的文本\n",
    "        for i in range(scores.shape[0]): #shape[0]是src_len\n",
    "            for j in range(scores.shape[1]): #shape[1]是target_len\n",
    "                ax.text(i, j, f'{scores[i, j]:.2f}',  # 格式化数字显示\n",
    "                               ha='center', va='center', color='k')\n",
    "\n",
    "        # 设置x轴和y轴的刻度，src_words_list是源语言的单词列表，trg_words_list是目标语言的单词列表\n",
    "        plt.xticks(range(scores.shape[1]), trg_words_list)\n",
    "        plt.yticks(range(scores.shape[0]), src_words_list)\n",
    "        plt.show()\n",
    "\n",
    "    def __call__(self, sentence):\n",
    "        sentence = preprocess_sentence(sentence) # 预处理句子，标点符号处理等\n",
    "        encoder_input, attn_mask = self.src_tokenizer.encode(\n",
    "            [sentence.split()], #sentence.split() 将句子拆分成单词列表\n",
    "            padding_first=True,\n",
    "            add_bos=True,\n",
    "            add_eos=True,\n",
    "            return_mask=True,\n",
    "            ) # 对输入进行编码，并返回encode_piadding_mask\n",
    "        encoder_input = torch.Tensor(encoder_input).to(dtype=torch.int64) # 转换成tensor\n",
    "\n",
    "        preds, scores = model.infer(src=encoder_input, src_mask=attn_mask) #预测\n",
    "        #通过tokenizer转换成文字，split=True表示返回列表，remove_eos=False表示不移除[EOS],是为了画热力图\n",
    "        trg_sentence = self.trg_tokenizer.decode([preds], split=True, remove_eos=False)[0] \n",
    "\n",
    "        src_decoded = self.src_tokenizer.decode(\n",
    "            encoder_input.tolist(),\n",
    "            split=True,\n",
    "            remove_bos=False,\n",
    "            remove_eos=False\n",
    "            )[0] #对输入编码id进行解码，转换成文字,为了画图\n",
    "\n",
    "        self.draw_attention_map(\n",
    "            scores.squeeze(0).numpy(),\n",
    "            src_decoded, # 注意力图的源句子\n",
    "            trg_sentence # 注意力图的目标句子\n",
    "            )\n",
    "        return \" \".join(trg_sentence[:-1]) #去掉最后一个token,因为最后一个token是[EOS]\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5dca60f",
   "metadata": {},
   "outputs": [],
   "source": [
    "translator = Translator(model.cpu(), src_tokenizer, trg_tokenizer)\n",
    "translator('hace mucho frio aqui .')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3ba93dbd",
   "metadata": {},
   "source": [
    "# bleu指标计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "55bda024",
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Seq2Seq(len(src_word2idx), len(trg_word2idx))\n",
    "model.load_state_dict(torch.load(f\"./checkpoints/best.ckpt\", map_location=\"cpu\"))\n",
    "\n",
    "class Translator:\n",
    "    def __init__(self, model, src_tokenizer, trg_tokenizer):\n",
    "        self.model = model\n",
    "        self.model.eval() # 切换到验证模式\n",
    "        self.src_tokenizer = src_tokenizer\n",
    "        self.trg_tokenizer = trg_tokenizer\n",
    "\n",
    "    def __call__(self, sentence):\n",
    "        sentence = preprocess_sentence(sentence) # 预处理句子，标点符号处理等\n",
    "        encoder_input, attn_mask = self.src_tokenizer.encode(\n",
    "            [sentence.split()],\n",
    "            padding_first=True,\n",
    "            add_bos=True,\n",
    "            add_eos=True,\n",
    "            return_mask=True,\n",
    "            ) # 对输入进行编码，并返回encode_piadding_mask\n",
    "        encoder_input = torch.Tensor(encoder_input).to(dtype=torch.int64) # 转换成tensor\n",
    "\n",
    "        preds, scores = model.infer(src=encoder_input, src_mask=attn_mask) #预测\n",
    "\n",
    "        trg_sentence = self.trg_tokenizer.decode([preds], split=True, remove_eos=False)[0] #通过tokenizer转换成文字\n",
    "\n",
    "        return \" \".join(trg_sentence[:-1])\n",
    "\n",
    "from nltk.translate.bleu_score import sentence_bleu\n",
    "\n",
    "def evaluate_bleu_on_test_set(test_data, translator):\n",
    "    \"\"\"\n",
    "    在测试集上计算平均 BLEU 分数。\n",
    "    :param test_data: 测试集数据，格式为 [(src_sentence, [ref_translation1, ref_translation2, ...]), ...]\n",
    "    :param translator: 翻译器对象（Translator 类的实例）\n",
    "    :return: 平均 BLEU 分数\n",
    "    \"\"\"\n",
    "    total_bleu = 0.0\n",
    "    num_samples = len(test_data)\n",
    "    i=0\n",
    "    for src_sentence, ref_translations in test_data:\n",
    "        # 使用翻译器生成翻译结果\n",
    "        candidate_translation = translator(src_sentence)\n",
    "\n",
    "        # 计算 BLEU 分数\n",
    "        bleu_score = sentence_bleu([ref_translations.split()], candidate_translation.split(),weights=(1, 0, 0, 0))\n",
    "        total_bleu += bleu_score\n",
    "\n",
    "        # 打印当前句子的 BLEU 分数（可选）\n",
    "        # print(f\"Source: {src_sentence}\")\n",
    "        # print(f\"Reference: {ref_translations}\")\n",
    "        # print(f\"Candidate: {candidate_translation}\")\n",
    "        # print(f\"BLEU: {bleu_score:.4f}\")\n",
    "        # print(\"-\" * 50)\n",
    "        # i+=1\n",
    "        # if i>10:\n",
    "        #     break\n",
    "    # 计算平均 BLEU 分数\n",
    "    avg_bleu = total_bleu / num_samples\n",
    "    return avg_bleu\n",
    "translator = Translator(model.cpu(), src_tokenizer, trg_tokenizer)\n",
    "evaluate_bleu_on_test_set(test_dataset, translator)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
