{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "eafa2358-4376-4538-8016-9208392dec71",
   "metadata": {},
   "source": [
    "### 1.数据预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "46547fe3-44bc-49c5-bc85-008d7be1a222",
   "metadata": {},
   "outputs": [],
   "source": [
    "fen = open(\"train.tags.zh-en.en\", encoding=\"utf-8\")\n",
    "fzh = open(\"train.tags.zh-en.zh\", encoding=\"utf-8\")\n",
    "en_zh = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "18b73f71-d57c-4e69-9bf1-aa6065679c6a",
   "metadata": {},
   "outputs": [],
   "source": [
    "while True:\n",
    "    lz = fzh.readline()\n",
    "    le = fen.readline()\n",
    "    if not lz:\n",
    "        assert not le\n",
    "        break\n",
    "    lz, le = lz.strip(), le.strip()\n",
    "    \n",
    "    #解析文件各部分\n",
    "    if lz.startswith(\"<url>\"):\n",
    "        assert le.startswith(\"<url>\")\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        \n",
    "        #关键词\n",
    "        assert lz.startswith(\"<keywords>\")\n",
    "        assert le.startswith(\"<keywords>\")\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        \n",
    "        #演讲人\n",
    "        assert lz.startswith(\"<speaker>\")\n",
    "        assert le.startswith(\"<speaker>\")\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        \n",
    "        #演讲ID\n",
    "        assert lz.startswith(\"<talkid>\")\n",
    "        assert le.startswith(\"<talkid>\")\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        \n",
    "        #标题\n",
    "        assert lz.startswith(\"<title>\")\n",
    "        assert le.startswith(\"<title>\")\n",
    "        lz = fzh.readline()\n",
    "        le = fen.readline()\n",
    "        \n",
    "        #描述\n",
    "        assert lz.startswith(\"<description>\")\n",
    "        assert le.startswith(\"<description>\")\n",
    "        \n",
    "    else:\n",
    "        if not lz:\n",
    "            assert not le\n",
    "            break\n",
    "        lee = []\n",
    "        for w in le.split(\" \"):  #对一句话中的单词进行分隔\n",
    "            w = w.replace(\".\",\"\").replace(\",\",\"\").lower()\n",
    "            if w:\n",
    "                lee.append(w)\n",
    "        en_zh.append([lee, list(lz)])  #en_zh = [[[英文], [中文]], [[英文句], [中文句]], ..., [[], []]]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "59a72d9e-2059-4825-ab59-0cdb90ce4080",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[['this', 'is', 'bill', 'lange', \"i'm\", 'dave', 'gallo'], ['大', '卫', '.', '盖', '罗', '：', '这', '位', '是', '比', '尔', '.', '兰', '格', '，', ' ', '我', '是', '大', '卫', '.', '盖', '罗', '。']], [['and', \"we're\", 'going', 'to', 'tell', 'you', 'some', 'stories', 'from', 'the', 'sea', 'here', 'in', 'video'], ['我', '们', '将', '用', '一', '些', '影', '片', '来', '讲', '述', '一', '些', '深', '海', '里', '的', '故', '事', '。']], [[\"we've\", 'got', 'some', 'of', 'the', 'most', 'incredible', 'video', 'of', 'titanic', \"that's\", 'ever', 'been', 'seen', 'and', \"we're\", 'not', 'going', 'to', 'show', 'you', 'any', 'of', 'it'], ['我', '们', '这', '有', '不', '少', '精', '彩', '的', '泰', '坦', '尼', '克', '的', '影', '片', '，', ' ', '可', '惜', '您', '今', '天', '看', '不', '到', '。']]]\n"
     ]
    }
   ],
   "source": [
    "print(en_zh[:3])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d34ed74e-1791-4c6c-93c5-c2334b0d2921",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|████████████████████████████████████████████████████████████████████████| 48239/48239 [00:00<00:00, 128266.36it/s]\n"
     ]
    }
   ],
   "source": [
    "#统计中英文词的各自的数量\n",
    "from tqdm import tqdm\n",
    "en_words = set()\n",
    "zh_words = set()\n",
    "\n",
    "for s in tqdm(en_zh):\n",
    "    for word in s[0]:\n",
    "        en_words.add(word)\n",
    "    for word in s[1]:\n",
    "        if word:\n",
    "            zh_words.add(word)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "0833f376-c318-431c-ad96-68619b47fe9f",
   "metadata": {},
   "outputs": [],
   "source": [
    "#在中英文各自词表中添加特殊标记词\n",
    "en_wl = [\"<sos>\", \"<eos>\", \"<pad>\"] + list(en_words)\n",
    "zh_wl = [\"<sos>\", \"<eos>\", \"<pad>\"] + list(zh_words)\n",
    "\n",
    "pad_id = 2\n",
    "en2id = {}\n",
    "zh2id = {}\n",
    "\n",
    "for i, w in enumerate(en_wl):\n",
    "    en2id[w] = i\n",
    "\n",
    "for i, w in enumerate(zh_wl):\n",
    "    zh2id[w] = i"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "08a8aac4-0ae9-4049-ae11-e2c5e5dde688",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "9763"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "en2id[\"this\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c67b509a-9136-4165-b3b1-150bdb25322e",
   "metadata": {},
   "source": [
    "### 2.构建训练集和测试集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "3f142ac0-25f5-4aab-b677-22460da9a49f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import random\n",
    "random.shuffle(en_zh)\n",
    "data_len = len(en_zh)\n",
    "train_data = en_zh[: int(data_len * 0.8)]\n",
    "test_data = en_zh[int(data_len * 0.8):]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "b729d867-4258-4b57-beba-0237f6e4670d",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "batch_size = 16\n",
    "#data_work = 8"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "2fa1e383-d150-4a0a-82a1-01ac690e5331",
   "metadata": {},
   "outputs": [],
   "source": [
    "#构建Dataset子类\n",
    "class MyDataset(torch.utils.data.Dataset):\n",
    "    def __init__(self, examples):\n",
    "        self.examples = examples\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.examples)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        example = self.examples[index]\n",
    "        s1 = example[0]  #英文\n",
    "        s2 = example[1]  #中文\n",
    "        \n",
    "        len1 = len(s1)\n",
    "        len2 = len(s2)\n",
    "        return s1, len1, s2, len2, index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "83fe05af-f77f-46ab-8299-fbd7f3413588",
   "metadata": {},
   "outputs": [],
   "source": [
    "def the_collate_fn(batch):\n",
    "    batch_size = len(batch)\n",
    "    src = [[0] * batch_size]\n",
    "    tar = [[0] * batch_size]\n",
    "    #src = []\n",
    "    #tar = []\n",
    "    \n",
    "    src_max_1 = 0\n",
    "    for b in batch:\n",
    "        src_max_1 = max(src_max_1, b[1])  #一个batch中最长的英文句子的长度\n",
    "    tar_max_1 = 0\n",
    "    for b in batch:\n",
    "        tar_max_1 = max(tar_max_1, b[3])  #一个batch中最长的中文句子的长度\n",
    "    \n",
    "    for x in batch:  #x: [s1, l1, s2, l2,index]\n",
    "        l = []\n",
    "        for w in x[0]:\n",
    "            l.append(en2id[w])\n",
    "        if(len(l) < src_max_1):\n",
    "            for epoch in range(src_max_1 - len(l)):\n",
    "                l.append(pad_id)\n",
    "        src.append(l)\n",
    "        \n",
    "    for x in batch:\n",
    "        l = []\n",
    "        for w in x[2]:\n",
    "            l.append(zh2id[w])\n",
    "        if(len(l) < tar_max_1):\n",
    "            for epoch in range(tar_max_1 - len(l)):\n",
    "                l.append(pad_id)\n",
    "        tar.append(l)\n",
    "\n",
    "    index = [b[4] for b in batch]\n",
    "    src.append([1] * batch_size)\n",
    "    tar.append([1] * batch_size)\n",
    "    s1 = torch.LongTensor(src)\n",
    "    s2 = torch.LongTensor(tar)\n",
    "    return s1, s2, index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "9fb8f7d5-3d67-4013-83cc-24ea1e8b74d6",
   "metadata": {},
   "outputs": [],
   "source": [
    "#构建Dataset和DataLoader\n",
    "train_dataset = MyDataset(train_data)\n",
    "test_dataset = MyDataset(test_data)\n",
    "\n",
    "train_loader = torch.utils.data.DataLoader(\n",
    "    train_dataset,\n",
    "    batch_size,\n",
    "    shuffle=True,\n",
    "    collate_fn=the_collate_fn,\n",
    ")\n",
    "\n",
    "test_loader = torch.utils.data.DataLoader(\n",
    "    test_dataset,\n",
    "    batch_size,\n",
    "    shuffle=False,\n",
    "    collate_fn=the_collate_fn\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18b995a7-24a1-443f-a870-109644fab49e",
   "metadata": {},
   "source": [
    "### 3.定义模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "1fddf305-4e85-4bdd-8b48-5de0be9f66ef",
   "metadata": {},
   "outputs": [],
   "source": [
    "#编码器与解码器均选择LSTM"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "3209d6f9-2dbf-401a-96bb-f74eeadea8e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#编码器\n",
    "import torch.nn as nn\n",
    "\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, word_count, embed_dim, hidden_dim, n_layers, dropout):\n",
    "        super().__init__()\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = nn.Embedding(word_count, embed_dim)\n",
    "        self.lstm = nn.LSTM(embed_dim, hidden_dim,n_layers, dropout=dropout)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self,src):\n",
    "        embedded = self.dropout(self.embedding(src))\n",
    "        outputs, (hidden, cell) = self.lstm(embedded)\n",
    "        return hidden, cell"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5e5c60a7-b387-42e0-a509-35c995228457",
   "metadata": {},
   "outputs": [],
   "source": [
    "#解码器\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, word_count, output_dim, embed_dim, hidden_dim, n_layers, dropout):\n",
    "        super().__init__()\n",
    "        self.output_dim = output_dim\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.embedding = nn.Embedding(word_count, embed_dim)\n",
    "        self.lstm = nn.LSTM(embed_dim, hidden_dim, n_layers, dropout=dropout)\n",
    "        self.cls = nn.Linear(hidden_dim, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, input, hidden, cell):\n",
    "        input = input.unsqueeze(0)\n",
    "        embeded = self.dropout(self.embedding(input))\n",
    "        output, (hidden, cell) = self.lstm(embeded, (hidden, cell))\n",
    "        prediction = self.cls(output.sequeeze(0))\n",
    "        return prediction, hidden, cell"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.19"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "state": {},
    "version_major": 2,
    "version_minor": 0
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
