{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Seq2Seq完成机器翻译任务\n",
    "\n",
    "本文使用Seq2Seq实现英译中的案例.\n",
    "\n",
    "## 1. 数据集准备"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "fen = open('train.tags.zh-en.en', encoding='utf8')\n",
    "fzh = open('train.tags.zh-en.zh', encoding='utf8')\n",
    "en_zh = []\n",
    "while True:\n",
    "    lz = fzh.readline()                         # 读取中文文件的一行\n",
    "    le = fen.readline()                         # 读取英文文件的一行\n",
    "    if not lz:                                  # 判断文件是否读完\n",
    "        assert not le\n",
    "        break\n",
    "    lz, le = lz.strip(), le.strip()\n",
    "    if lz.startswith('<url>'):                  #! 解析文件\n",
    "        assert le.startswith('<url>')\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        assert lz.startswith('<keywords>')\n",
    "        assert le.startswith('<keywords>')\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        assert lz.startswith('<speaker>')\n",
    "        assert le.startswith('<speaker>')\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        assert lz.startswith('<talkid>')\n",
    "        assert le.startswith('<talkid>')\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        assert lz.startswith('<title>')\n",
    "        assert le.startswith('<title>')\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        assert lz.startswith('<description>')\n",
    "        assert le.startswith('<description>')\n",
    "    else:\n",
    "        if not lz:\n",
    "            assert not le\n",
    "            break\n",
    "        lee = []\n",
    "        for w in le.split(' '):\n",
    "            w = w.replace('.', '').replace(',', '')\n",
    "            if w:\n",
    "                lee.append(w)\n",
    "        en_zh.append([lee, list(lz)])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 48239/48239 [00:00<00:00, 74149.32it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "48239 34734 4012\n",
      "[['This', 'is', 'Bill', 'Lange', \"I'm\", 'Dave', 'Gallo'], ['大', '卫', '.', '盖', '罗', '：', '这', '位', '是', '比', '尔', '.', '兰', '格', '，', ' ', '我', '是', '大', '卫', '.', '盖', '罗', '。']]\n",
      "[['Most', 'of', 'the', 'animals', 'are', 'in', 'the', 'oceans'], ['大', '部', '分', '的', '动', '物', '也', '都', '生', '活', '在', '海', '洋', '里', '。']]\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "from tqdm import tqdm\n",
    "en_words = set()\n",
    "zh_words = set()\n",
    "for s in tqdm(en_zh):\n",
    "    for w in s[0]:\n",
    "        w = w.replace('.', '').replace(',', '').lower()\n",
    "        if w:\n",
    "            en_words.add(w)\n",
    "    for w in s[1]:\n",
    "        if w:\n",
    "            zh_words.add(w)\n",
    "print(len(en_zh), len(en_words), len(zh_words))          # 1.句子数量: 48239; 2. 英文和中文的词表大小: 34734 4012\n",
    "print(en_zh[0])\n",
    "print(en_zh[10])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "en_wl  = ['<sos>', '<eos>', '<pad>'] + list(en_words)      # 添加起始，结束和填充符号\n",
    "zh_wl  = ['<sos>', '<eos>', '<pad>'] + list(zh_words)\n",
    "pad_id = 2\n",
    "\n",
    "en2id = {}                                                 # 单词到对应词号的转换\n",
    "zh2id = {}\n",
    "for i, w in enumerate(en_wl):\n",
    "    en2id[w] = i\n",
    "for i, w in enumerate(zh_wl):\n",
    "    zh2id[w] = i"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 2. 构建训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "random.shuffle(en_zh)                                     # 将数据集打乱\n",
    "dl        = len(en_zh)\n",
    "train_set = en_zh[:int(dl*0.8)]                           # 前80%用于训练\n",
    "dev_set   = en_zh[int(dl*0.8):]                           # 后20%用于测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置数据加载器\n",
    "import torch\n",
    "batch_size   = 16\n",
    "data_workers = 0\n",
    "\n",
    "class DataSet(torch.utils.data.Dataset):\n",
    "    def __init__(self, examples):\n",
    "        self.examples = examples\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.examples)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        example = self.examples[index]\n",
    "        s1 = example[0]\n",
    "        s2 = example[1]\n",
    "        l1 = len(s1)\n",
    "        l2 = len(s2)\n",
    "        return s1, l1, s2, l2, index   # 英文，中文，英文句子长度，中文句子长度， 索引\n",
    "\n",
    "def collate_fn(batch):                 # batching的处理函数\n",
    "    src = [[0]*batch_size]\n",
    "    tar = [[0]*batch_size]\n",
    "    src_max_l = 0\n",
    "    for b in batch:\n",
    "        src_max_l = max(src_max_l, b[1])\n",
    "    tar_max_l = 0\n",
    "    for b in batch:\n",
    "        tar_max_l = max(tar_max_l, b[3])\n",
    "    for i in range(src_max_l):\n",
    "        l = []\n",
    "        for x in batch:\n",
    "            if i < x[1]:\n",
    "                l.append(en2id[x[0][i].lower()])\n",
    "            else:\n",
    "                l.append(pad_id)\n",
    "        src.append(l)\n",
    "    \n",
    "    for i in range(tar_max_l):\n",
    "        l = []\n",
    "        for x in batch:\n",
    "            if i < x[3]:\n",
    "                l.append(zh2id[x[2][i]])\n",
    "            else:\n",
    "                l.append(pad_id)\n",
    "        tar.append(l)\n",
    "    indexs = [b[4] for b in batch]\n",
    "    src.append([1] * batch_size)\n",
    "    tar.append([1] * batch_size)\n",
    "    s1 = torch.LongTensor(src)\n",
    "    s2 = torch.LongTensor(tar)\n",
    "    return s1, s2, indexs\n",
    "\n",
    "train_dataset = DataSet(train_set)\n",
    "train_data_loader = torch.utils.data.DataLoader(\n",
    "    train_dataset,\n",
    "    batch_size=batch_size,\n",
    "    shuffle = True,\n",
    "    num_workers=data_workers,\n",
    "    collate_fn=collate_fn,\n",
    ")\n",
    "\n",
    "\n",
    "dev_dataset = DataSet(dev_set)\n",
    "dev_data_loader = torch.utils.data.DataLoader(\n",
    "    dev_dataset,\n",
    "    batch_size=batch_size,\n",
    "    shuffle = True,\n",
    "    num_workers=data_workers,\n",
    "    collate_fn=collate_fn,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 3. 定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.nn as nn\n",
    "\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, input_dim, emb_dim, hid_dim, n_layers, dropout):\n",
    "        \"\"\"\n",
    "        @brief 编码器部分的模型结构\n",
    "        @param input_dim  源语种的词汇表大小,引文是英译中, 因此为英文词汇表大小.\n",
    "        @param emb_dim    源语种词嵌入大小\n",
    "        @param hid_dim    隐藏层维度\n",
    "        @param n_layers\n",
    "        @param dropout\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.hid_dim   = hid_dim\n",
    "        self.n_layers  = n_layers\n",
    "        self.embedding = nn.Embedding(input_dim, emb_dim)\n",
    "        self.rnn       = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)\n",
    "        self.dropout   = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, src):   # src shape = [src seq_len, batch_size]\n",
    "        embedded = self.dropout(self.embedding(src))             \n",
    "        outputs, (hidden, cell) = self.rnn(embedded)\n",
    "        #! outputs维度 = [src_seq_len, batch_size, hid_dim]   一个句子中的所有输出拼在一起的\n",
    "        #! hidden维度  = [n_layers, batch_size, hid_dim]      只看最后一个词的，所以没有src_seq_len\n",
    "        #! cell维度    = [n_layers, batch_size, hid_dim]\n",
    "        # outputs来自最顶部的隐藏层\n",
    "        # print(\" encoder >>> \", outputs.shape, hidden.shape, cell.shape)\n",
    "        return hidden, cell\n",
    "    \n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, output_dim, emb_dim, hid_dim, n_layers, dropout):\n",
    "        \"\"\"\n",
    "        @brief 解码器部分的模型结构\n",
    "        @param output_dim  目标语种的词汇表大小,引文是英译中, 因此为中文词汇表大小.\n",
    "        @param emb_dim     目标语种词嵌入大小\n",
    "        @param hid_dim     隐藏层维度\n",
    "        @param n_layers\n",
    "        @param dropout\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.output_dim = output_dim   #! 目标语种的词汇表大小,引文是英译中, 因此为中文词汇表大小.\n",
    "        self.hid_dim    = hid_dim\n",
    "        self.n_layers   = n_layers\n",
    "        self.embedding  = nn.Embedding(output_dim, emb_dim)\n",
    "        self.rnn        = nn.LSTM(emb_dim, hid_dim, n_layers, dropout = dropout)\n",
    "        self.fc_out     = nn.Linear(hid_dim, output_dim)\n",
    "        self.dropout    = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, input, hidden, cell):        \n",
    "        input    = input.unsqueeze(0)\n",
    "        embedded = self.dropout(self.embedding(input))   \n",
    "        output, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n",
    "        prediction             = self.fc_out(output.squeeze(0))\n",
    "        #! prediction维度:  torch.Size([16, output_dim])\n",
    "        #! output维度  = [1, batch_size, hid_dim]            一个句子中的所有输出拼在一起的\n",
    "        #! hidden维度  = [n_layers, batch_size, hid_dim]      只看最后一个词的，所以没有src_seq_len\n",
    "        #! cell维度    = [n_layers, batch_size, hid_dim]\n",
    "        # print(\" decoder >>> \", output.shape, prediction.shape, hidden.shape, cell.shape)\n",
    "        return prediction, hidden, cell\n",
    "\n",
    "class Seq2Seq(nn.Module):\n",
    "    def __init__(self, input_word_count, output_word_count, encode_dim, decode_dim, \n",
    "                hidden_dim, n_layers, encode_dropout, decode_dropout, device):\n",
    "        super().__init__()\n",
    "        self.encoder = Encoder(input_word_count,  encode_dim, hidden_dim, n_layers, encode_dropout)\n",
    "        self.decoder = Decoder(output_word_count, decode_dim, hidden_dim, n_layers, decode_dropout)\n",
    "        self.device  = device\n",
    "        \n",
    "    def forward(self, src, trg, teacher_forcing_ratio = 0.5):\n",
    "        \"\"\" \n",
    "        @brief \n",
    "        @param src 源语种的一批句子;   shape  =  [src len, batch size] \n",
    "        @param trg 目标语言的一批句子; shape  =  [trg len, batch size] \n",
    "        @param 使用teacher forcing的概率; 所谓teacher forcing, 就是在训练阶段，将实际要预测的数据作为解码器的输入;\n",
    "        \"\"\"\n",
    "        batch_size     = trg.shape[1]\n",
    "        trg_len        = trg.shape[0]\n",
    "        trg_vocab_size = self.decoder.output_dim         # 目标语言(中文)的词汇量大小\n",
    "        #tensor to store decoder outputs\n",
    "        \n",
    "        #! 用于存储解码的输出, 输出为\n",
    "        outputs = torch.zeros(trg_len, batch_size, trg_vocab_size).to(self.device)\n",
    "\n",
    "        #last hidden state of the encoder is used as the initial hidden state of the decoder\n",
    "        #! 编码模型只需运行一次\n",
    "        #! hidden维度  = [n_layers, batch_size, hid_dim]  只看最后一个词的，所以没有src_seq_len\n",
    "        #! cell维度    = [n_layers, batch_size, hid_dim]\n",
    "        hidden, cell = self.encoder(src)\n",
    "\n",
    "        # 首次取所有目标句子的<sos>作为输入\n",
    "        input = trg[0,:]     #! 行表示词的位置索引, 列表示batch\n",
    "\n",
    "        #! 解码模型需要token by token 的处理\n",
    "        for t in range(1, trg_len):\n",
    "            #! output维度  = torch.Size([16, output_dim])      token by token 的, 所以没有seq_len\n",
    "            #! hidden维度  = [n_layers, batch_size, hid_dim]     \n",
    "            #! cell维度    = [n_layers, batch_size, hid_dim]\n",
    "            #! hidden与cell首次解码时使用encoder的，后续的就使用decoder自身的\n",
    "            output, hidden, cell = self.decoder(input, hidden, cell)\n",
    "            outputs[t] = output                               # 保存本次解码生成的token\n",
    "\n",
    "            teacher_force = random.random() < teacher_forcing_ratio\n",
    "            top1 = output.argmax(1) \n",
    "\n",
    "            #! 下一次迭代的输入可以是模型生成的top1结果或者原始的实际数据\n",
    "            input = trg[t] if teacher_force else top1\n",
    "        return outputs"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 4. 初始化模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "source_word_count = len(en_wl)\n",
    "target_word_count = len(zh_wl)\n",
    "encode_dim        = 256\n",
    "decode_dim        = 256\n",
    "hidden_dim        = 512\n",
    "n_layers          = 2\n",
    "encode_dropout    = 0.5\n",
    "decode_dropout    = 0.5\n",
    "device = torch.device('cpu')\n",
    "model  = Seq2Seq(source_word_count, target_word_count, encode_dim, decode_dim, \n",
    "            hidden_dim, n_layers, encode_dropout, decode_dropout, device).to(device)\n",
    "\n",
    "def init_weights(m):\n",
    "    for name, param in m.named_parameters():\n",
    "        nn.init.uniform_(param.data, -0.08, 0.08)\n",
    "model.apply(init_weights)\n",
    "\n",
    "def count_parameters(model):\n",
    "    return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "\n",
    "print(f'The model has {count_parameters(model):,} trainable parameters')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 5. 优化器与损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch.optim as optim\n",
    "optimizer = optim.Adam(model.parameters())\n",
    "criterion = nn.CrossEntropyLoss(ignore_index = pad_id)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 6. 训练与评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model, iterator, optimizer, criterion, clip):\n",
    "    model.train()\n",
    "    epoch_loss = 0\n",
    "    \n",
    "    for i, batch in enumerate(iterator):\n",
    "        src = batch[0]         #! torch.Tensor, torch.Size([54, 16])   16为batch size\n",
    "        trg = batch[1]         #! torch.Tensor, torch.Size([146, 16])\n",
    "        # print(src.shape, trg.shape)\n",
    "        optimizer.zero_grad()\n",
    "        \n",
    "        #! output shape = (trg_len, batch_size, trg_vocab_size)\n",
    "        output     = model(src, trg)        # output为模型生成的句子\n",
    "        output_dim = output.shape[-1]       # 目标语言词汇表大小\n",
    "        \n",
    "        output     = output[1:].view(-1, output_dim)  # 移除<eos>即begin标签\n",
    "        trg        = trg[1:].view(-1)\n",
    "        loss       = criterion(output, trg)           # TODO: 这里维度不同，怎么计算损失的？\n",
    "        # print(output.shape, trg.shape)\n",
    "        loss.backward()\n",
    "        \n",
    "        #! clip_grad_norm_ 函数的工作原理是首先计算所有模型参数梯度的范数，如果该范数超过了指定的阈值，则会将梯度缩放到使得范数等于阈值。\n",
    "        #! 这样，无论原始梯度的大小如何，裁剪后的梯度都不会超过设定的阈值。\n",
    "        # torch.nn.utils.clip_grad_norm_(parameters, max_norm, norm_type=2)\n",
    "        torch.nn.utils.clip_grad_norm_(model.parameters(), clip)\n",
    "        \n",
    "        optimizer.step()\n",
    "        epoch_loss += loss.item()\n",
    "    return epoch_loss / len(iterator)\n",
    "\n",
    "def evaluate(model, iterator, criterion):\n",
    "    model.eval()\n",
    "    epoch_loss = 0\n",
    "    with torch.no_grad():\n",
    "        for i, batch in enumerate(iterator):\n",
    "            src    = batch[0]\n",
    "            trg    = batch[1]\n",
    "            output = model(src, trg, 0) #turn off teacher forcing\n",
    "            output_dim = output.shape[-1]\n",
    "            output     = output[1:].view(-1, output_dim)\n",
    "            trg        = trg[1:].view(-1)\n",
    "\n",
    "            loss = criterion(output, trg)\n",
    "            epoch_loss += loss.item()\n",
    "    return epoch_loss / len(iterator)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 7. 训练模型与评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import time\n",
    "\n",
    "N_EPOCHS = 1\n",
    "CLIP     = 1\n",
    "best_valid_loss = float('inf')\n",
    "\n",
    "def epoch_time(start_time, end_time):\n",
    "    elapsed_time = end_time - start_time\n",
    "    elapsed_mins = int(elapsed_time / 60)\n",
    "    elapsed_secs = int(elapsed_time - (elapsed_mins * 60))\n",
    "    return elapsed_mins, elapsed_secs\n",
    "\n",
    "for epoch in tqdm(list(range(N_EPOCHS))):\n",
    "    start_time = time.time()\n",
    "    train_loss = train(model, train_data_loader, optimizer, criterion, CLIP)\n",
    "    # valid_loss = evaluate(model, dev_data_loader, criterion)\n",
    "    end_time   = time.time()\n",
    "    \n",
    "    epoch_mins, epoch_secs = epoch_time(start_time, end_time)\n",
    "    \n",
    "    # if valid_loss < best_valid_loss:\n",
    "    #     best_valid_loss = valid_loss\n",
    "    #     torch.save(model.state_dict(), 'tut1-model.pt')\n",
    "    \n",
    "    print(f'Epoch: {epoch+1:02} | Time: {epoch_mins}m {epoch_secs}s')\n",
    "    print(f'\\tTrain Loss: {train_loss:.3f} | Train PPL: {math.exp(train_loss):7.3f}')\n",
    "    # print(f'\\t Val. Loss: {valid_loss:.3f} |  Val. PPL: {math.exp(valid_loss):7.3f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 8. 评估测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def translate(en_sentence):\n",
    "    words = []\n",
    "    for word in en_words.strip().split(\" \"):\n",
    "        words.append(word.replace(\".\", \"\").replace(\",\",\"\").lower())\n",
    "    ids = [[0]]      # <sos>\n",
    "    for word in words:\n",
    "        ids.append([en2id(word)])\n",
    "    ids.append([1])  # <eos>\n",
    "    src = torch.tensor(ids).to(device)\n",
    "    model.decoder.eval()\n",
    "    model.encoder.eval()\n",
    "    l = []\n",
    "    with torch.no_grad():\n",
    "        hidden, cell = model.encoder(src)\n",
    "        input        = src[0,:]\n",
    "        while True:\n",
    "            output, hidden, cell = model.decoder(input, hidden, cell)\n",
    "            l.append(output)\n",
    "            top1 = output.argmax(1)\n",
    "            if top1 == 1:        # 解码遇到终止符号\n",
    "                return l\n",
    "            input = top1"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
