{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.852787Z",
     "start_time": "2025-05-26T06:27:14.844299Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import Transformer\n",
    "import math\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "\n",
    "# 配置参数\n",
    "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "BATCH_SIZE = 2\n",
    "NUM_EPOCHS = 100\n",
    "LEARNING_RATE = 0.001\n",
    "D_MODEL = 128  # 就是EMBEDDING_SIZE\n",
    "NUM_HEAD = 4\n",
    "NUM_ENCODER_LAYERS = 3\n",
    "NUM_DECODER_LAYERS = 3\n",
    "DIM_FEEDFORWARD = 512\n",
    "DROPOUT = 0.1\n",
    "MAX_SEQ_LENGTH = 20  # 推理时用的"
   ],
   "outputs": [],
   "execution_count": 79
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.871161Z",
     "start_time": "2025-05-26T06:27:14.868623Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 原始数据\n",
    "raw_data = [\n",
    "    (\"你好，今天天气真好！\", \"Hello, the weather is nice today!\"),\n",
    "    (\"你吃饭了吗？\", \"Have you eaten yet?\"),\n",
    "    (\"深度学习很有趣。\", \"Deep learning is interesting.\"),\n",
    "    (\"我们一起学习吧。\", \"Let's study together.\"),\n",
    "    (\"这是一个测试例子。\", \"This is a test example.\")\n",
    "]"
   ],
   "id": "f1c6fdad671f2635",
   "outputs": [],
   "execution_count": 80
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.889299Z",
     "start_time": "2025-05-26T06:27:14.883390Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据预处理\n",
    "def preprocess_data(data):\n",
    "    src_sentences = []\n",
    "    tgt_sentences = []\n",
    "\n",
    "    # 中文按字符分割，英文按单词分割\n",
    "    for src, tgt in data:\n",
    "        src_tokens = list(src)\n",
    "        tgt_tokens = tgt.lower().split()  # 英文转为小写\n",
    "        src_sentences.append(src_tokens)\n",
    "        tgt_sentences.append(tgt_tokens)\n",
    "    return src_sentences, tgt_sentences\n",
    "\n",
    "src_sentences, tgt_sentences = preprocess_data(raw_data)\n",
    "src_sentences, tgt_sentences"
   ],
   "id": "7aade81080628def",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([['你', '好', '，', '今', '天', '天', '气', '真', '好', '！'],\n",
       "  ['你', '吃', '饭', '了', '吗', '？'],\n",
       "  ['深', '度', '学', '习', '很', '有', '趣', '。'],\n",
       "  ['我', '们', '一', '起', '学', '习', '吧', '。'],\n",
       "  ['这', '是', '一', '个', '测', '试', '例', '子', '。']],\n",
       " [['hello,', 'the', 'weather', 'is', 'nice', 'today!'],\n",
       "  ['have', 'you', 'eaten', 'yet?'],\n",
       "  ['deep', 'learning', 'is', 'interesting.'],\n",
       "  [\"let's\", 'study', 'together.'],\n",
       "  ['this', 'is', 'a', 'test', 'example.']])"
      ]
     },
     "execution_count": 81,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 81
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.915172Z",
     "start_time": "2025-05-26T06:27:14.910827Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建词汇表\n",
    "def build_vocab(sentences):\n",
    "    vocab = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}\n",
    "    for sentence in sentences:\n",
    "        for token in sentence:\n",
    "            if token not in vocab:\n",
    "                vocab[token] = len(vocab)\n",
    "    return vocab\n",
    "\n",
    "src_vocab = build_vocab(src_sentences)\n",
    "tgt_vocab = build_vocab(tgt_sentences)\n",
    "\n",
    "src_vocab, tgt_vocab"
   ],
   "id": "cb470ec639e0c102",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "({'<pad>': 0,\n",
       "  '<sos>': 1,\n",
       "  '<eos>': 2,\n",
       "  '<unk>': 3,\n",
       "  '你': 4,\n",
       "  '好': 5,\n",
       "  '，': 6,\n",
       "  '今': 7,\n",
       "  '天': 8,\n",
       "  '气': 9,\n",
       "  '真': 10,\n",
       "  '！': 11,\n",
       "  '吃': 12,\n",
       "  '饭': 13,\n",
       "  '了': 14,\n",
       "  '吗': 15,\n",
       "  '？': 16,\n",
       "  '深': 17,\n",
       "  '度': 18,\n",
       "  '学': 19,\n",
       "  '习': 20,\n",
       "  '很': 21,\n",
       "  '有': 22,\n",
       "  '趣': 23,\n",
       "  '。': 24,\n",
       "  '我': 25,\n",
       "  '们': 26,\n",
       "  '一': 27,\n",
       "  '起': 28,\n",
       "  '吧': 29,\n",
       "  '这': 30,\n",
       "  '是': 31,\n",
       "  '个': 32,\n",
       "  '测': 33,\n",
       "  '试': 34,\n",
       "  '例': 35,\n",
       "  '子': 36},\n",
       " {'<pad>': 0,\n",
       "  '<sos>': 1,\n",
       "  '<eos>': 2,\n",
       "  '<unk>': 3,\n",
       "  'hello,': 4,\n",
       "  'the': 5,\n",
       "  'weather': 6,\n",
       "  'is': 7,\n",
       "  'nice': 8,\n",
       "  'today!': 9,\n",
       "  'have': 10,\n",
       "  'you': 11,\n",
       "  'eaten': 12,\n",
       "  'yet?': 13,\n",
       "  'deep': 14,\n",
       "  'learning': 15,\n",
       "  'interesting.': 16,\n",
       "  \"let's\": 17,\n",
       "  'study': 18,\n",
       "  'together.': 19,\n",
       "  'this': 20,\n",
       "  'a': 21,\n",
       "  'test': 22,\n",
       "  'example.': 23})"
      ]
     },
     "execution_count": 82,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 82
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.925933Z",
     "start_time": "2025-05-26T06:27:14.922757Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 反转词汇表用于解码\n",
    "idx_to_tgt = {v: k for k, v in tgt_vocab.items()}\n",
    "idx_to_tgt"
   ],
   "id": "87fa6708baf80ca4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{0: '<pad>',\n",
       " 1: '<sos>',\n",
       " 2: '<eos>',\n",
       " 3: '<unk>',\n",
       " 4: 'hello,',\n",
       " 5: 'the',\n",
       " 6: 'weather',\n",
       " 7: 'is',\n",
       " 8: 'nice',\n",
       " 9: 'today!',\n",
       " 10: 'have',\n",
       " 11: 'you',\n",
       " 12: 'eaten',\n",
       " 13: 'yet?',\n",
       " 14: 'deep',\n",
       " 15: 'learning',\n",
       " 16: 'interesting.',\n",
       " 17: \"let's\",\n",
       " 18: 'study',\n",
       " 19: 'together.',\n",
       " 20: 'this',\n",
       " 21: 'a',\n",
       " 22: 'test',\n",
       " 23: 'example.'}"
      ]
     },
     "execution_count": 83,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 83
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.939457Z",
     "start_time": "2025-05-26T06:27:14.933784Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 自定义Dataset\n",
    "class TranslationDataset(Dataset):\n",
    "    def __init__(self, src_sentences, tgt_sentences, src_vocab, tgt_vocab):\n",
    "        self.data = []\n",
    "        for src, tgt in zip(src_sentences, tgt_sentences):\n",
    "            src_indices = [src_vocab.get(token, src_vocab['<unk>']) for token in src]\n",
    "            tgt_indices = [tgt_vocab.get(token, tgt_vocab['<unk>']) for token in tgt]\n",
    "\n",
    "            # 添加特殊符号\n",
    "            src_indices = [src_vocab['<sos>']] + src_indices + [src_vocab['<eos>']]\n",
    "            tgt_input = [tgt_vocab['<sos>']] + tgt_indices\n",
    "            tgt_output = tgt_indices + [tgt_vocab['<eos>']]\n",
    "\n",
    "            self.data.append((\n",
    "                torch.tensor(src_indices, dtype=torch.long),\n",
    "                torch.tensor(tgt_input, dtype=torch.long),\n",
    "                torch.tensor(tgt_output, dtype=torch.long)\n",
    "            ))\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return self.data[idx]\n",
    "\n",
    "dataset = TranslationDataset(src_sentences, tgt_sentences, src_vocab, tgt_vocab)\n",
    "dataset[:2]"
   ],
   "id": "9d1c27155a783a6b",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[(tensor([ 1,  4,  5,  6,  7,  8,  8,  9, 10,  5, 11,  2]),\n",
       "  tensor([1, 4, 5, 6, 7, 8, 9]),\n",
       "  tensor([4, 5, 6, 7, 8, 9, 2])),\n",
       " (tensor([ 1,  4, 12, 13, 14, 15, 16,  2]),\n",
       "  tensor([ 1, 10, 11, 12, 13]),\n",
       "  tensor([10, 11, 12, 13,  2]))]"
      ]
     },
     "execution_count": 84,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 84
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.954522Z",
     "start_time": "2025-05-26T06:27:14.951756Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据加载器\n",
    "def collate_fn(batch):\n",
    "    src_batch, tgt_input_batch, tgt_output_batch = zip(*batch)\n",
    "\n",
    "    src_batch = pad_sequence(src_batch, padding_value=src_vocab['<pad>'])\n",
    "    tgt_input_batch = pad_sequence(tgt_input_batch, padding_value=tgt_vocab['<pad>'])\n",
    "    tgt_output_batch = pad_sequence(tgt_output_batch, padding_value=tgt_vocab['<pad>'])\n",
    "\n",
    "    return src_batch, tgt_input_batch, tgt_output_batch\n",
    "\n",
    "data_loader = DataLoader(dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn)"
   ],
   "id": "26f6e536375b476b",
   "outputs": [],
   "execution_count": 85
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.965606Z",
     "start_time": "2025-05-26T06:27:14.962868Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码\n",
    "# 相当于生成了一个很大的位置编码，根据x的seq_len直接获取对应长度的位置编码\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_len=5000):\n",
    "        super().__init__()\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        pos = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        pe[:, 0::2] = torch.sin(pos / (10000 ** (2 * torch.arange(0, d_model, 2) / d_model)))\n",
    "        pe[:, 1::2] = torch.cos(pos / (10000 ** (2 * torch.arange(1, d_model, 2) / d_model)))\n",
    "        self.pe = pe.unsqueeze(0)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x + self.pe[:, :x.size(1), :]\n",
    "        return x"
   ],
   "id": "b4c6148adffb1b54",
   "outputs": [],
   "execution_count": 86
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:14.976868Z",
     "start_time": "2025-05-26T06:27:14.973823Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Transformer模型\n",
    "class TransformerModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.src_embedding = nn.Embedding(len(src_vocab), D_MODEL)\n",
    "        self.tgt_embedding = nn.Embedding(len(tgt_vocab), D_MODEL)\n",
    "        self.pos_encoder = PositionalEncoding(D_MODEL)\n",
    "        self.transformer = Transformer(\n",
    "            d_model=D_MODEL,\n",
    "            nhead=NUM_HEAD,\n",
    "            num_encoder_layers=NUM_ENCODER_LAYERS,\n",
    "            num_decoder_layers=NUM_DECODER_LAYERS,\n",
    "            dim_feedforward=DIM_FEEDFORWARD,\n",
    "            dropout=DROPOUT\n",
    "        )\n",
    "        self.fc_out = nn.Linear(D_MODEL, len(tgt_vocab))\n",
    "\n",
    "    def forward(self, src, tgt):\n",
    "\n",
    "        # 生成mask\n",
    "        # 训练时，已经知道了tgt的seq_len，所以可以直接生成掩码\n",
    "        tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size(0)).to(DEVICE)\n",
    "\n",
    "        # 嵌入和位置编码\n",
    "        src_emb = self.pos_encoder(self.src_embedding(src) * math.sqrt(D_MODEL))\n",
    "        tgt_emb = self.pos_encoder(self.tgt_embedding(tgt) * math.sqrt(D_MODEL))\n",
    "\n",
    "        # Transformer处理\n",
    "        output = self.transformer(\n",
    "            src_emb, tgt_emb,\n",
    "            tgt_mask=tgt_mask\n",
    "        )\n",
    "\n",
    "        return self.fc_out(output)"
   ],
   "id": "8e52097d64495e52",
   "outputs": [],
   "execution_count": 87
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:27:17.856301Z",
     "start_time": "2025-05-26T06:27:14.983828Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = TransformerModel().to(DEVICE)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=tgt_vocab['<pad>'])\n",
    "\n",
    "# 训练循环\n",
    "for epoch in range(NUM_EPOCHS):\n",
    "    model.train()\n",
    "    total_loss = 0\n",
    "\n",
    "    for src, tgt_input, tgt_output in data_loader:\n",
    "        src = src.to(DEVICE)\n",
    "        tgt_input = tgt_input.to(DEVICE)\n",
    "        tgt_output = tgt_output.to(DEVICE)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        output = model(src, tgt_input)\n",
    "\n",
    "        # 调整输出形状\n",
    "        loss = criterion(output.view(-1, output.size(-1)), tgt_output.view(-1))\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += loss.item()\n",
    "\n",
    "    avg_loss = total_loss / len(data_loader)\n",
    "    print(f'Epoch [{epoch+1}/{NUM_EPOCHS}], Loss: {avg_loss:.4f}')"
   ],
   "id": "2eb65b25b022f95f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/100], Loss: 3.6089\n",
      "Epoch [2/100], Loss: 2.7485\n",
      "Epoch [3/100], Loss: 2.5500\n",
      "Epoch [4/100], Loss: 2.0145\n",
      "Epoch [5/100], Loss: 1.6653\n",
      "Epoch [6/100], Loss: 1.2310\n",
      "Epoch [7/100], Loss: 0.9032\n",
      "Epoch [8/100], Loss: 0.7228\n",
      "Epoch [9/100], Loss: 0.5670\n",
      "Epoch [10/100], Loss: 0.4589\n",
      "Epoch [11/100], Loss: 0.3643\n",
      "Epoch [12/100], Loss: 0.2827\n",
      "Epoch [13/100], Loss: 0.2007\n",
      "Epoch [14/100], Loss: 0.1878\n",
      "Epoch [15/100], Loss: 0.1415\n",
      "Epoch [16/100], Loss: 0.1073\n",
      "Epoch [17/100], Loss: 0.1058\n",
      "Epoch [18/100], Loss: 0.0883\n",
      "Epoch [19/100], Loss: 0.0716\n",
      "Epoch [20/100], Loss: 0.0709\n",
      "Epoch [21/100], Loss: 0.0587\n",
      "Epoch [22/100], Loss: 0.0553\n",
      "Epoch [23/100], Loss: 0.0515\n",
      "Epoch [24/100], Loss: 0.0431\n",
      "Epoch [25/100], Loss: 0.0523\n",
      "Epoch [26/100], Loss: 0.0381\n",
      "Epoch [27/100], Loss: 0.0398\n",
      "Epoch [28/100], Loss: 0.0376\n",
      "Epoch [29/100], Loss: 0.0307\n",
      "Epoch [30/100], Loss: 0.0314\n",
      "Epoch [31/100], Loss: 0.0278\n",
      "Epoch [32/100], Loss: 0.0296\n",
      "Epoch [33/100], Loss: 0.0277\n",
      "Epoch [34/100], Loss: 0.0269\n",
      "Epoch [35/100], Loss: 0.0250\n",
      "Epoch [36/100], Loss: 0.0242\n",
      "Epoch [37/100], Loss: 0.0236\n",
      "Epoch [38/100], Loss: 0.0221\n",
      "Epoch [39/100], Loss: 0.0212\n",
      "Epoch [40/100], Loss: 0.0201\n",
      "Epoch [41/100], Loss: 0.0216\n",
      "Epoch [42/100], Loss: 0.0210\n",
      "Epoch [43/100], Loss: 0.0189\n",
      "Epoch [44/100], Loss: 0.0180\n",
      "Epoch [45/100], Loss: 0.0180\n",
      "Epoch [46/100], Loss: 0.0191\n",
      "Epoch [47/100], Loss: 0.0193\n",
      "Epoch [48/100], Loss: 0.0168\n",
      "Epoch [49/100], Loss: 0.0166\n",
      "Epoch [50/100], Loss: 0.0153\n",
      "Epoch [51/100], Loss: 0.0150\n",
      "Epoch [52/100], Loss: 0.0149\n",
      "Epoch [53/100], Loss: 0.0152\n",
      "Epoch [54/100], Loss: 0.0139\n",
      "Epoch [55/100], Loss: 0.0138\n",
      "Epoch [56/100], Loss: 0.0129\n",
      "Epoch [57/100], Loss: 0.0150\n",
      "Epoch [58/100], Loss: 0.0128\n",
      "Epoch [59/100], Loss: 0.0121\n",
      "Epoch [60/100], Loss: 0.0135\n",
      "Epoch [61/100], Loss: 0.0110\n",
      "Epoch [62/100], Loss: 0.0112\n",
      "Epoch [63/100], Loss: 0.0122\n",
      "Epoch [64/100], Loss: 0.0120\n",
      "Epoch [65/100], Loss: 0.0110\n",
      "Epoch [66/100], Loss: 0.0106\n",
      "Epoch [67/100], Loss: 0.0103\n",
      "Epoch [68/100], Loss: 0.0109\n",
      "Epoch [69/100], Loss: 0.0101\n",
      "Epoch [70/100], Loss: 0.0105\n",
      "Epoch [71/100], Loss: 0.0092\n",
      "Epoch [72/100], Loss: 0.0099\n",
      "Epoch [73/100], Loss: 0.0097\n",
      "Epoch [74/100], Loss: 0.0093\n",
      "Epoch [75/100], Loss: 0.0092\n",
      "Epoch [76/100], Loss: 0.0088\n",
      "Epoch [77/100], Loss: 0.0091\n",
      "Epoch [78/100], Loss: 0.0086\n",
      "Epoch [79/100], Loss: 0.0089\n",
      "Epoch [80/100], Loss: 0.0081\n",
      "Epoch [81/100], Loss: 0.0086\n",
      "Epoch [82/100], Loss: 0.0083\n",
      "Epoch [83/100], Loss: 0.0072\n",
      "Epoch [84/100], Loss: 0.0082\n",
      "Epoch [85/100], Loss: 0.0077\n",
      "Epoch [86/100], Loss: 0.0078\n",
      "Epoch [87/100], Loss: 0.0073\n",
      "Epoch [88/100], Loss: 0.0077\n",
      "Epoch [89/100], Loss: 0.0076\n",
      "Epoch [90/100], Loss: 0.0078\n",
      "Epoch [91/100], Loss: 0.0069\n",
      "Epoch [92/100], Loss: 0.0069\n",
      "Epoch [93/100], Loss: 0.0070\n",
      "Epoch [94/100], Loss: 0.0064\n",
      "Epoch [95/100], Loss: 0.0063\n",
      "Epoch [96/100], Loss: 0.0070\n",
      "Epoch [97/100], Loss: 0.0064\n",
      "Epoch [98/100], Loss: 0.0062\n",
      "Epoch [99/100], Loss: 0.0058\n",
      "Epoch [100/100], Loss: 0.0062\n"
     ]
    }
   ],
   "execution_count": 88
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:32:00.682819Z",
     "start_time": "2025-05-26T07:32:00.672563Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 推理函数\n",
    "def translate(model, sentence, src_vocab, tgt_vocab, device):\n",
    "    model.eval()\n",
    "    tokens = list(sentence)\n",
    "    src_indices = [src_vocab.get(token, src_vocab['<unk>']) for token in tokens]\n",
    "    src = torch.tensor([src_vocab['<sos>']] + src_indices + [src_vocab['<eos>']]).unsqueeze(1).to(device)\n",
    "\n",
    "    memory = model.transformer.encoder(model.pos_encoder(model.src_embedding(src)))\n",
    "\n",
    "    tgt = torch.tensor([[tgt_vocab['<sos>']]]).to(device)\n",
    "    for _ in range(MAX_SEQ_LENGTH):\n",
    "\n",
    "        # 推理时，tgt是不断变化的，所以每次要生成新的掩码\n",
    "        tgt_mask = model.transformer.generate_square_subsequent_mask(tgt.size(0)).to(device)\n",
    "\n",
    "        output = model.transformer.decoder(\n",
    "            model.pos_encoder(model.tgt_embedding(tgt)),\n",
    "            memory,\n",
    "            tgt_mask=tgt_mask\n",
    "        )\n",
    "        prob = model.fc_out(output[-1, :, :])\n",
    "        next_token = prob.argmax().item()\n",
    "        tgt = torch.cat([tgt, torch.tensor([[next_token]]).to(device)], dim=0)\n",
    "\n",
    "        if next_token == tgt_vocab['<eos>']:\n",
    "            break\n",
    "\n",
    "    translated = [idx_to_tgt.get(idx, '<unk>') for idx in tgt.squeeze().tolist()[1:-1]]\n",
    "    return ' '.join(translated)\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"我们一起学习吧。\"\n",
    "print(f'Source: {test_sentence}')\n",
    "print(f'Translation: {translate(model, test_sentence, src_vocab, tgt_vocab, DEVICE)}')"
   ],
   "id": "fed9769a193d5a54",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Source: 我们一起学习吧。\n",
      "Translation: let's study together.\n"
     ]
    }
   ],
   "execution_count": 103
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
