{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.381190Z",
     "start_time": "2025-05-26T06:53:36.376799Z"
    }
   },
   "source": [
    "# 模拟seq2seq中的两个seq\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "batch_size = 2\n",
    "seq_len = 5\n",
    "embedding_size = 100\n",
    "vocab_size = 1000\n",
    "\n",
    "inputs = torch.randint(0, vocab_size, (batch_size, seq_len))\n",
    "outputs = torch.randint(0, vocab_size, (batch_size, seq_len))"
   ],
   "outputs": [],
   "execution_count": 184
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.418316Z",
     "start_time": "2025-05-26T06:53:36.412421Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码\n",
    "# 位置编码只跟位置有关，和具体位置上输入的数据无关\n",
    "\n",
    "def positional_encoding(seq_len, embedding_size):\n",
    "    pe = torch.zeros(seq_len, embedding_size)\n",
    "    pos = torch.arange(0, seq_len, dtype=torch.float).unsqueeze(1)\n",
    "    pe[:, 0::2] = torch.sin(pos / (10000 ** (2 * torch.arange(0, embedding_size, 2) / embedding_size)))\n",
    "    pe[:, 1::2] = torch.cos(pos / (10000 ** (2 * torch.arange(1, embedding_size, 2) / embedding_size)))\n",
    "    return pe\n",
    "\n",
    "\n",
    "pe = positional_encoding(seq_len, embedding_size)\n",
    "pe"
   ],
   "id": "567f416a79f7297a",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,\n",
       "          1.0000e+00,  0.0000e+00,  1.0000e+00,  0.0000e+00,  1.0000e+00],\n",
       "        [ 8.4147e-01,  6.7357e-01,  6.3795e-01,  8.3895e-01,  4.6056e-01,\n",
       "          9.2180e-01,  3.2511e-01,  9.6231e-01,  2.2709e-01,  9.8190e-01,\n",
       "          1.5783e-01,  9.9132e-01,  1.0943e-01,  9.9584e-01,  7.5785e-02,\n",
       "          9.9801e-01,  5.2457e-02,  9.9905e-01,  3.6300e-02,  9.9954e-01,\n",
       "          2.5116e-02,  9.9978e-01,  1.7377e-02,  9.9990e-01,  1.2022e-02,\n",
       "          9.9995e-01,  8.3175e-03,  9.9998e-01,  5.7544e-03,  9.9999e-01,\n",
       "          3.9811e-03,  9.9999e-01,  2.7542e-03,  1.0000e+00,  1.9055e-03,\n",
       "          1.0000e+00,  1.3183e-03,  1.0000e+00,  9.1201e-04,  1.0000e+00,\n",
       "          6.3096e-04,  1.0000e+00,  4.3652e-04,  1.0000e+00,  3.0200e-04,\n",
       "          1.0000e+00,  2.0893e-04,  1.0000e+00,  1.4454e-04,  1.0000e+00,\n",
       "          1.0000e-04,  1.0000e+00,  6.9183e-05,  1.0000e+00,  4.7863e-05,\n",
       "          1.0000e+00,  3.3113e-05,  1.0000e+00,  2.2909e-05,  1.0000e+00,\n",
       "          1.5849e-05,  1.0000e+00,  1.0965e-05,  1.0000e+00,  7.5858e-06,\n",
       "          1.0000e+00,  5.2481e-06,  1.0000e+00,  3.6308e-06,  1.0000e+00,\n",
       "          2.5119e-06,  1.0000e+00,  1.7378e-06,  1.0000e+00,  1.2023e-06,\n",
       "          1.0000e+00,  8.3176e-07,  1.0000e+00,  5.7544e-07,  1.0000e+00,\n",
       "          3.9811e-07,  1.0000e+00,  2.7542e-07,  1.0000e+00,  1.9055e-07,\n",
       "          1.0000e+00,  1.3183e-07,  1.0000e+00,  9.1201e-08,  1.0000e+00,\n",
       "          6.3096e-08,  1.0000e+00,  4.3652e-08,  1.0000e+00,  3.0200e-08,\n",
       "          1.0000e+00,  2.0893e-08,  1.0000e+00,  1.4454e-08,  1.0000e+00],\n",
       "        [ 9.0930e-01, -9.2598e-02,  9.8254e-01,  4.0768e-01,  8.1762e-01,\n",
       "          6.9942e-01,  6.1490e-01,  8.5208e-01,  4.4231e-01,  9.2826e-01,\n",
       "          3.1170e-01,  9.6544e-01,  2.1754e-01,  9.8341e-01,  1.5113e-01,\n",
       "          9.9205e-01,  1.0477e-01,  9.9619e-01,  7.2552e-02,  9.9818e-01,\n",
       "          5.0217e-02,  9.9913e-01,  3.4749e-02,  9.9958e-01,  2.4043e-02,\n",
       "          9.9980e-01,  1.6635e-02,  9.9990e-01,  1.1509e-02,  9.9995e-01,\n",
       "          7.9621e-03,  9.9998e-01,  5.5084e-03,  9.9999e-01,  3.8109e-03,\n",
       "          9.9999e-01,  2.6365e-03,  1.0000e+00,  1.8240e-03,  1.0000e+00,\n",
       "          1.2619e-03,  1.0000e+00,  8.7303e-04,  1.0000e+00,  6.0399e-04,\n",
       "          1.0000e+00,  4.1786e-04,  1.0000e+00,  2.8909e-04,  1.0000e+00,\n",
       "          2.0000e-04,  1.0000e+00,  1.3837e-04,  1.0000e+00,  9.5726e-05,\n",
       "          1.0000e+00,  6.6226e-05,  1.0000e+00,  4.5817e-05,  1.0000e+00,\n",
       "          3.1698e-05,  1.0000e+00,  2.1930e-05,  1.0000e+00,  1.5172e-05,\n",
       "          1.0000e+00,  1.0496e-05,  1.0000e+00,  7.2616e-06,  1.0000e+00,\n",
       "          5.0238e-06,  1.0000e+00,  3.4756e-06,  1.0000e+00,  2.4045e-06,\n",
       "          1.0000e+00,  1.6635e-06,  1.0000e+00,  1.1509e-06,  1.0000e+00,\n",
       "          7.9621e-07,  1.0000e+00,  5.5085e-07,  1.0000e+00,  3.8109e-07,\n",
       "          1.0000e+00,  2.6365e-07,  1.0000e+00,  1.8240e-07,  1.0000e+00,\n",
       "          1.2619e-07,  1.0000e+00,  8.7303e-08,  1.0000e+00,  6.0399e-08,\n",
       "          1.0000e+00,  4.1786e-08,  1.0000e+00,  2.8909e-08,  1.0000e+00],\n",
       "        [ 1.4112e-01, -7.9832e-01,  8.7532e-01, -1.5490e-01,  9.9091e-01,\n",
       "          3.6764e-01,  8.3788e-01,  6.7762e-01,  6.3442e-01,  8.4102e-01,\n",
       "          4.5775e-01,  9.2281e-01,  3.2304e-01,  9.6280e-01,  2.2561e-01,\n",
       "          9.8214e-01,  1.5679e-01,  9.9144e-01,  1.0871e-01,  9.9590e-01,\n",
       "          7.5285e-02,  9.9804e-01,  5.2110e-02,  9.9906e-01,  3.6060e-02,\n",
       "          9.9955e-01,  2.4950e-02,  9.9978e-01,  1.7262e-02,  9.9990e-01,\n",
       "          1.1943e-02,  9.9995e-01,  8.2626e-03,  9.9998e-01,  5.7164e-03,\n",
       "          9.9999e-01,  3.9548e-03,  9.9999e-01,  2.7360e-03,  1.0000e+00,\n",
       "          1.8929e-03,  1.0000e+00,  1.3095e-03,  1.0000e+00,  9.0599e-04,\n",
       "          1.0000e+00,  6.2679e-04,  1.0000e+00,  4.3363e-04,  1.0000e+00,\n",
       "          3.0000e-04,  1.0000e+00,  2.0755e-04,  1.0000e+00,  1.4359e-04,\n",
       "          1.0000e+00,  9.9339e-05,  1.0000e+00,  6.8726e-05,  1.0000e+00,\n",
       "          4.7547e-05,  1.0000e+00,  3.2894e-05,  1.0000e+00,  2.2757e-05,\n",
       "          1.0000e+00,  1.5744e-05,  1.0000e+00,  1.0892e-05,  1.0000e+00,\n",
       "          7.5357e-06,  1.0000e+00,  5.2134e-06,  1.0000e+00,  3.6068e-06,\n",
       "          1.0000e+00,  2.4953e-06,  1.0000e+00,  1.7263e-06,  1.0000e+00,\n",
       "          1.1943e-06,  1.0000e+00,  8.2627e-07,  1.0000e+00,  5.7164e-07,\n",
       "          1.0000e+00,  3.9548e-07,  1.0000e+00,  2.7360e-07,  1.0000e+00,\n",
       "          1.8929e-07,  1.0000e+00,  1.3095e-07,  1.0000e+00,  9.0599e-08,\n",
       "          1.0000e+00,  6.2679e-08,  1.0000e+00,  4.3363e-08,  1.0000e+00],\n",
       "        [-7.5680e-01, -9.8285e-01,  3.6559e-01, -6.6759e-01,  9.4151e-01,\n",
       "         -2.1631e-02,  9.6983e-01,  4.5209e-01,  7.9338e-01,  7.2333e-01,\n",
       "          5.9234e-01,  8.6417e-01,  4.2466e-01,  9.3419e-01,  2.9880e-01,\n",
       "          9.6832e-01,  2.0838e-01,  9.8480e-01,  1.4472e-01,  9.9271e-01,\n",
       "          1.0031e-01,  9.9651e-01,  6.9456e-02,  9.9833e-01,  4.8072e-02,\n",
       "          9.9920e-01,  3.3264e-02,  9.9962e-01,  2.3016e-02,  9.9982e-01,\n",
       "          1.5924e-02,  9.9991e-01,  1.1017e-02,  9.9996e-01,  7.6218e-03,\n",
       "          9.9998e-01,  5.2730e-03,  9.9999e-01,  3.6480e-03,  1.0000e+00,\n",
       "          2.5238e-03,  1.0000e+00,  1.7461e-03,  1.0000e+00,  1.2080e-03,\n",
       "          1.0000e+00,  8.3572e-04,  1.0000e+00,  5.7818e-04,  1.0000e+00,\n",
       "          4.0000e-04,  1.0000e+00,  2.7673e-04,  1.0000e+00,  1.9145e-04,\n",
       "          1.0000e+00,  1.3245e-04,  1.0000e+00,  9.1635e-05,  1.0000e+00,\n",
       "          6.3396e-05,  1.0000e+00,  4.3859e-05,  1.0000e+00,  3.0343e-05,\n",
       "          1.0000e+00,  2.0992e-05,  1.0000e+00,  1.4523e-05,  1.0000e+00,\n",
       "          1.0048e-05,  1.0000e+00,  6.9512e-06,  1.0000e+00,  4.8091e-06,\n",
       "          1.0000e+00,  3.3271e-06,  1.0000e+00,  2.3018e-06,  1.0000e+00,\n",
       "          1.5924e-06,  1.0000e+00,  1.1017e-06,  1.0000e+00,  7.6218e-07,\n",
       "          1.0000e+00,  5.2730e-07,  1.0000e+00,  3.6480e-07,  1.0000e+00,\n",
       "          2.5238e-07,  1.0000e+00,  1.7461e-07,  1.0000e+00,  1.2080e-07,\n",
       "          1.0000e+00,  8.3572e-08,  1.0000e+00,  5.7818e-08,  1.0000e+00]])"
      ]
     },
     "execution_count": 185,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 185
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.474401Z",
     "start_time": "2025-05-26T06:53:36.470060Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 词嵌入\n",
    "input_embedding = nn.Embedding(vocab_size, embedding_size)\n",
    "output_embedding = nn.Embedding(vocab_size, embedding_size)\n",
    "\n",
    "input_embeddings = input_embedding(inputs)\n",
    "output_embeddings = output_embedding(inputs)\n",
    "\n",
    "# 加上位置编码\n",
    "input_embeddings_pe = input_embeddings + pe\n",
    "output_embeddings_pe = output_embeddings + pe"
   ],
   "id": "c03febaae089ddd3",
   "outputs": [],
   "execution_count": 186
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.534718Z",
     "start_time": "2025-05-26T06:53:36.529266Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Multi-Head Attention多头注意力\n",
    "class MultiHeadAttention(nn.Module):\n",
    "    def __init__(self, embed_dim, head_dim, num_heads):\n",
    "        super().__init__()\n",
    "\n",
    "        self.num_heads = num_heads\n",
    "        self.head_dim = head_dim\n",
    "\n",
    "        # 合并所有头的线性变换矩阵\n",
    "        self.q_proj = nn.Linear(embed_dim, head_dim * num_heads)  # 维度(3, 4)\n",
    "        self.k_proj = nn.Linear(embed_dim, head_dim * num_heads)\n",
    "        self.v_proj = nn.Linear(embed_dim, head_dim * num_heads)\n",
    "        self.out_proj = nn.Linear(head_dim * num_heads, embed_dim)\n",
    "\n",
    "    def forward(self, query, key, value, mask=None):\n",
    "        batch_size, query_seq_len, _ = query.shape\n",
    "        _, key_seq_len, _ = key.shape\n",
    "        _, value_seq_len, _ = value.shape\n",
    "\n",
    "        # 计算所有头的qkv\n",
    "        q = self.q_proj(query)  # x是(4, 3), 输出是(4, 4)\n",
    "        k = self.k_proj(key)  # x是(4, 3), 输出是(4, 4)\n",
    "        v = self.v_proj(value)  # x是(4, 3), 输出是(4, 4)\n",
    "\n",
    "        # 分割为多个头\n",
    "        # view()的作用是将张量reshape为指定形状，也就是(4, 2, 2)\n",
    "        # transpose(0, 1)的作用是将维度1和维度2进行转置，也就是(2, 4, 2)把num_heads放到前面去，这样就方便去每个head的qkv\n",
    "        q = q.view(batch_size, query_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "        k = k.view(batch_size, key_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "        v = v.view(batch_size, value_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "\n",
    "        # 计算注意力得分 [num_heads, seq_len, seq_len]，最终形状为(2, 4, 4)\n",
    "        scores = torch.matmul(q, k.transpose(-2, -1)) / (self.head_dim ** 0.5)  #添加缩放因子，防止乘积过大\n",
    "\n",
    "        if mask is not None:\n",
    "            scores = scores.masked_fill(mask == 0, -1e9)\n",
    "\n",
    "        # 计算注意力权重 [num_heads, seq_len, seq_len]，最终形状为(2, 4, 4)\n",
    "        attn_weights = torch.softmax(scores, dim=-1)\n",
    "\n",
    "        # 应用注意力权重 [num_heads, seq_len, head_dim]，最终形状为(2, 4, 2)\n",
    "        output = torch.matmul(attn_weights, v)\n",
    "\n",
    "        # 合并所有头\n",
    "        # output.transpose(0, 1) [batch_size, num_heads, seq_len, head_dim] -> [seq_len, num_heads, head_dim]\n",
    "        # 在合并多头输出时，维度转置错误导致输出形状不正确\n",
    "        # transpose(0,1) 错误交换了 batch_size 和 num_heads 维度，导致后续形状不匹配。\n",
    "        # output = output.transpose(0, 1).contiguous()\n",
    "        output = output.transpose(1, 2).contiguous().view(batch_size, query_seq_len, -1)\n",
    "\n",
    "        # [seq_len, num_heads, head_dim] -> [seq_len, num_heads * head_dim] 也就是(4, 2*2)\n",
    "        output = output.view(batch_size, query_seq_len, -1)\n",
    "\n",
    "        # 最终输出投影\n",
    "        return self.out_proj(output)"
   ],
   "id": "b4fa983cda524904",
   "outputs": [],
   "execution_count": 187
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.577720Z",
     "start_time": "2025-05-26T06:53:36.575345Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Feed Forward，两个线程层，最后输出维度不变\n",
    "class FeedForward(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim):\n",
    "        super().__init__()\n",
    "        self.fc1 = nn.Linear(embed_dim, fc_dim)\n",
    "        self.fc2 = nn.Linear(fc_dim, embed_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.fc2(torch.relu(self.fc1(x)))"
   ],
   "id": "6f46ed8d0d3ae1c6",
   "outputs": [],
   "execution_count": 188
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.603171Z",
     "start_time": "2025-05-26T06:53:36.599479Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义Encoder\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads, num_layers):\n",
    "        super().__init__()\n",
    "        self.num_layers = num_layers\n",
    "        self.layers = nn.ModuleList([\n",
    "            EncoderLayer(embed_dim, fc_dim, num_heads)\n",
    "            for _ in range(num_layers)\n",
    "        ])\n",
    "\n",
    "    def forward(self, encoder_inputs):\n",
    "        for layer in self.layers:\n",
    "            encoder_inputs = layer(encoder_inputs)\n",
    "        return encoder_inputs\n",
    "\n",
    "\n",
    "# 定义EncoderLayer\n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads):\n",
    "        super().__init__()\n",
    "        self.mha = MultiHeadAttention(embed_dim, embed_dim // num_heads, num_heads)\n",
    "        self.feed_forward = FeedForward(embed_dim, fc_dim)\n",
    "        self.layer_norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.layer_norm2 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "    def forward(self, encoder_inputs):\n",
    "        # 1.多头注意力\n",
    "        # 2.残差连接和层归一化\n",
    "        encoder_inputs = self.layer_norm1(\n",
    "            encoder_inputs + self.mha(query=encoder_inputs, key=encoder_inputs, value=encoder_inputs))\n",
    "\n",
    "        # 3.Feed Forward\n",
    "        # 4.残差连接和层归一化\n",
    "        return self.layer_norm2(encoder_inputs + self.feed_forward(encoder_inputs))"
   ],
   "id": "ef86552d1b4e6476",
   "outputs": [],
   "execution_count": 189
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.628145Z",
     "start_time": "2025-05-26T06:53:36.624140Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义Decoder\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads, num_layers):\n",
    "        super().__init__()\n",
    "        self.num_layers = num_layers\n",
    "        self.num_heads = num_heads\n",
    "        self.layers = nn.ModuleList([\n",
    "            DecoderLayer(embed_dim, fc_dim, num_heads)\n",
    "            for _ in range(num_layers)\n",
    "        ])\n",
    "        self.linear = nn.Linear(embed_dim, vocab_size)\n",
    "\n",
    "    def forward(self, inputs_decoder, encoder_outputs, mask=None):\n",
    "        for layer in self.layers:\n",
    "            inputs_decoder = layer(inputs_decoder, encoder_outputs, mask=mask)\n",
    "\n",
    "        # 由交叉熵损失进行softmax即可\n",
    "        # return torch.softmax(self.linear(inputs_decoder), dim=-1)\n",
    "        return self.linear(inputs_decoder)\n",
    "\n",
    "\n",
    "# 定义DecoderLayer\n",
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads):\n",
    "        super().__init__()\n",
    "        self.mha = MultiHeadAttention(embed_dim, embed_dim // num_heads, num_heads)\n",
    "        self.cross_mha = MultiHeadAttention(embed_dim, embed_dim // num_heads, num_heads)\n",
    "        self.feed_forward = FeedForward(embed_dim, fc_dim)\n",
    "        self.layer_norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.layer_norm2 = nn.LayerNorm(embed_dim)\n",
    "        self.layer_norm3 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "    def forward(self, inputs_decoder, encoder_outputs, mask=None):\n",
    "        outputs_attention = self.layer_norm1(\n",
    "            inputs_decoder + self.mha(query=inputs_decoder, key=inputs_decoder, value=inputs_decoder, mask=mask))\n",
    "\n",
    "        outputs_cross_attention = self.layer_norm2(\n",
    "            outputs_attention + self.cross_mha(query=outputs_attention, key=encoder_outputs, value=encoder_outputs))\n",
    "\n",
    "        return self.layer_norm3(outputs_cross_attention + self.feed_forward(outputs_cross_attention))"
   ],
   "id": "ebeb1a6e484fd292",
   "outputs": [],
   "execution_count": 190
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.657511Z",
     "start_time": "2025-05-26T06:53:36.649040Z"
    }
   },
   "cell_type": "code",
   "source": [
    "encoder = Encoder(embed_dim=embedding_size, fc_dim=embedding_size * 4, num_heads=2, num_layers=2)\n",
    "decoder = Decoder(embed_dim=embedding_size, fc_dim=embedding_size * 4, num_heads=2, num_layers=2)\n",
    "\n",
    "encoder_outputs = encoder(encoder_inputs=input_embeddings_pe)\n",
    "decoder_outputs = decoder(inputs_decoder=output_embeddings_pe, encoder_outputs=encoder_outputs)\n",
    "decoder_outputs.shape"
   ],
   "id": "49d9afe72574c1a3",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 5, 1000])"
      ]
     },
     "execution_count": 191,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 191
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.684799Z",
     "start_time": "2025-05-26T06:53:36.681034Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Transformer(nn.Module):\n",
    "    def __init__(self, embed_dim, num_heads, encoder_num_layers, decoder_num_layers):\n",
    "        super().__init__()\n",
    "        self.embed_dim = embed_dim\n",
    "        self.num_heads = num_heads\n",
    "        self.encoder_num_layers = encoder_num_layers\n",
    "        self.decoder_num_layers = decoder_num_layers\n",
    "        self.input_embedding = nn.Embedding(vocab_size, embed_dim)\n",
    "        self.output_embedding = nn.Embedding(vocab_size, embed_dim)\n",
    "        self.encoder = Encoder(embed_dim=embed_dim, fc_dim=embed_dim * 4, num_heads=num_heads,\n",
    "                               num_layers=encoder_num_layers)\n",
    "        self.decoder = Decoder(embed_dim=embed_dim, fc_dim=embed_dim * 4, num_heads=num_heads,\n",
    "                               num_layers=decoder_num_layers)\n",
    "\n",
    "    def forward(self, inputs, outputs, mask=None):\n",
    "        input_seq_len = inputs.size(1)\n",
    "        output_seq_len = outputs.size(1)\n",
    "\n",
    "        encoder_inputs = self.input_embedding(inputs) + positional_encoding(input_seq_len, self.embed_dim)\n",
    "        encoder_outputs = self.encoder(encoder_inputs)\n",
    "\n",
    "        decoder_inputs = self.output_embedding(outputs) + positional_encoding(output_seq_len, self.embed_dim)\n",
    "        decoder_outputs = self.decoder(decoder_inputs, encoder_outputs, mask)\n",
    "\n",
    "        return decoder_outputs"
   ],
   "id": "8c10136b62e4b8fc",
   "outputs": [],
   "execution_count": 192
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.782356Z",
     "start_time": "2025-05-26T06:53:36.706466Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch import optim\n",
    "\n",
    "EMBEDDING_SIZE = 512\n",
    "NUM_HEADS = 2\n",
    "ENCODER_NUM_LAYERS = 2\n",
    "DECODER_NUM_LAYERS = 2\n",
    "SEQ_LEN = 10\n",
    "BATCH_SIZE = 1\n",
    "\n",
    "transformer = Transformer(embed_dim=EMBEDDING_SIZE, num_heads=NUM_HEADS, encoder_num_layers=ENCODER_NUM_LAYERS,\n",
    "                          decoder_num_layers=DECODER_NUM_LAYERS)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = optim.Adam(transformer.parameters(), lr=0.0001)"
   ],
   "id": "43b563dde5336488",
   "outputs": [],
   "execution_count": 193
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.809838Z",
     "start_time": "2025-05-26T06:53:36.805141Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from collections import Counter\n",
    "\n",
    "import jieba\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "# 自定义简单数据集\n",
    "raw_data = [\n",
    "    (\"你好，今天天气真好！\", \"Hello, the weather is nice today!\"),\n",
    "    (\"你吃饭了吗？\", \"Have you eaten yet?\"),\n",
    "    (\"深度学习很有趣。\", \"Deep learning is interesting.\"),\n",
    "    (\"我们一起学习吧。\", \"Let's study together.\"),\n",
    "    (\"这是一个测试例子。\", \"This is a test example.\")\n",
    "]\n",
    "\n",
    "\n",
    "# 中文分词函数\n",
    "def tokenize_chinese(text):\n",
    "    return list(jieba.cut(text))  # 使用结巴分词\n",
    "\n",
    "\n",
    "# 英文分词函数\n",
    "def tokenize_english(text):\n",
    "    return text.lower().split()\n",
    "\n",
    "\n",
    "# 处理原始数据\n",
    "chinese_sentences = [tokenize_chinese(pair[0]) for pair in raw_data]\n",
    "english_sentences = [tokenize_english(pair[1]) for pair in raw_data]\n",
    "\n",
    "chinese_sentences, english_sentences"
   ],
   "id": "c024e4230858bce",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([['你好', '，', '今天天气', '真', '好', '！'],\n",
       "  ['你', '吃饭', '了', '吗', '？'],\n",
       "  ['深度', '学习', '很', '有趣', '。'],\n",
       "  ['我们', '一起', '学习', '吧', '。'],\n",
       "  ['这是', '一个', '测试', '例子', '。']],\n",
       " [['hello,', 'the', 'weather', 'is', 'nice', 'today!'],\n",
       "  ['have', 'you', 'eaten', 'yet?'],\n",
       "  ['deep', 'learning', 'is', 'interesting.'],\n",
       "  [\"let's\", 'study', 'together.'],\n",
       "  ['this', 'is', 'a', 'test', 'example.']])"
      ]
     },
     "execution_count": 194,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 194
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.841998Z",
     "start_time": "2025-05-26T06:53:36.838096Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 处理特殊符号\n",
    "special_tokens = ['<pad>', '<sos>', '<eos>', '<unk>']\n",
    "\n",
    "\n",
    "# 构建词汇表\n",
    "def build_vocab(sentences, min_freq=1):\n",
    "    counter = Counter()\n",
    "\n",
    "    for sentence in sentences:\n",
    "        for word in sentence:\n",
    "            counter[word] += 1\n",
    "\n",
    "    vocab = special_tokens.copy()\n",
    "\n",
    "    # 遍历没个词以及出现的次数，至少要出现min_freq次才放到词汇表中\n",
    "    for word, count in counter.items():\n",
    "        if count >= min_freq and word not in special_tokens:\n",
    "            vocab.append(word)\n",
    "\n",
    "    word2idx = {word: idx for idx, word in enumerate(vocab)}\n",
    "    return vocab, word2idx\n",
    "\n",
    "\n",
    "# 构建中英文词汇表\n",
    "zh_vocab, zh_word2idx = build_vocab([sentence for sentence in chinese_sentences])\n",
    "\n",
    "en_vocab, en_word2idx = build_vocab([sentence for sentence in english_sentences])\n",
    "\n",
    "print(zh_vocab)\n",
    "print(en_vocab)\n",
    "print(zh_word2idx)\n",
    "print(en_word2idx)"
   ],
   "id": "33521b23cb82eb17",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['<pad>', '<sos>', '<eos>', '<unk>', '你好', '，', '今天天气', '真', '好', '！', '你', '吃饭', '了', '吗', '？', '深度', '学习', '很', '有趣', '。', '我们', '一起', '吧', '这是', '一个', '测试', '例子']\n",
      "['<pad>', '<sos>', '<eos>', '<unk>', 'hello,', 'the', 'weather', 'is', 'nice', 'today!', 'have', 'you', 'eaten', 'yet?', 'deep', 'learning', 'interesting.', \"let's\", 'study', 'together.', 'this', 'a', 'test', 'example.']\n",
      "{'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3, '你好': 4, '，': 5, '今天天气': 6, '真': 7, '好': 8, '！': 9, '你': 10, '吃饭': 11, '了': 12, '吗': 13, '？': 14, '深度': 15, '学习': 16, '很': 17, '有趣': 18, '。': 19, '我们': 20, '一起': 21, '吧': 22, '这是': 23, '一个': 24, '测试': 25, '例子': 26}\n",
      "{'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3, 'hello,': 4, 'the': 5, 'weather': 6, 'is': 7, 'nice': 8, 'today!': 9, 'have': 10, 'you': 11, 'eaten': 12, 'yet?': 13, 'deep': 14, 'learning': 15, 'interesting.': 16, \"let's\": 17, 'study': 18, 'together.': 19, 'this': 20, 'a': 21, 'test': 22, 'example.': 23}\n"
     ]
    }
   ],
   "execution_count": 195
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.873700Z",
     "start_time": "2025-05-26T06:53:36.869752Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据数值化处理\n",
    "def numericalize(sentence, word2idx):\n",
    "    # 如果某个词在字典中找不到，则用'<unk>'的索引代替\n",
    "    return [word2idx.get(word, word2idx['<unk>']) for word in sentence]\n",
    "\n",
    "\n",
    "processed_data_ch = []\n",
    "processed_data_en = []\n",
    "for ch, en in zip(chinese_sentences, english_sentences):\n",
    "    # ch，en分别是一个中文句子和对应的英文句子\n",
    "    # 在每个句子的前面加上'<sos>'，在每个句子的后面加上'<eos>'，这样大模型才能知道什么时候停止生成句子\n",
    "    ch_numerical = [zh_word2idx['<sos>']] + numericalize(ch, zh_word2idx) + [zh_word2idx['<eos>']]\n",
    "    en_numerical = [en_word2idx['<sos>']] + numericalize(en, en_word2idx) + [en_word2idx['<eos>']]\n",
    "    processed_data_ch.append(torch.LongTensor(ch_numerical))\n",
    "    processed_data_en.append(torch.LongTensor(en_numerical))\n",
    "\n",
    "print(processed_data_ch)\n",
    "print(processed_data_en)"
   ],
   "id": "b6113025119aacd5",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[tensor([1, 4, 5, 6, 7, 8, 9, 2]), tensor([ 1, 10, 11, 12, 13, 14,  2]), tensor([ 1, 15, 16, 17, 18, 19,  2]), tensor([ 1, 20, 21, 16, 22, 19,  2]), tensor([ 1, 23, 24, 25, 26, 19,  2])]\n",
      "[tensor([1, 4, 5, 6, 7, 8, 9, 2]), tensor([ 1, 10, 11, 12, 13,  2]), tensor([ 1, 14, 15,  7, 16,  2]), tensor([ 1, 17, 18, 19,  2]), tensor([ 1, 20,  7, 21, 22, 23,  2])]\n"
     ]
    }
   ],
   "execution_count": 196
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:36.918253Z",
     "start_time": "2025-05-26T06:53:36.908018Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_ch_pad = nn.utils.rnn.pad_sequence(processed_data_ch, batch_first=True,\n",
    "                                                  padding_value=zh_word2idx['<pad>'])\n",
    "processed_data_ch_pad"
   ],
   "id": "ab33215825ccd38a",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "        [ 1, 10, 11, 12, 13, 14,  2,  0],\n",
       "        [ 1, 15, 16, 17, 18, 19,  2,  0],\n",
       "        [ 1, 20, 21, 16, 22, 19,  2,  0],\n",
       "        [ 1, 23, 24, 25, 26, 19,  2,  0]])"
      ]
     },
     "execution_count": 197,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 197
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:37.013056Z",
     "start_time": "2025-05-26T06:53:37.009728Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_en_pad = nn.utils.rnn.pad_sequence(processed_data_en, batch_first=True,\n",
    "                                                  padding_value=en_word2idx['<pad>'])\n",
    "processed_data_en_pad"
   ],
   "id": "752e2fbe6133358d",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "        [ 1, 10, 11, 12, 13,  2,  0,  0],\n",
       "        [ 1, 14, 15,  7, 16,  2,  0,  0],\n",
       "        [ 1, 17, 18, 19,  2,  0,  0,  0],\n",
       "        [ 1, 20,  7, 21, 22, 23,  2,  0]])"
      ]
     },
     "execution_count": 198,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 198
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:37.083099Z",
     "start_time": "2025-05-26T06:53:37.079630Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "dataset = TensorDataset(processed_data_ch_pad, processed_data_en_pad)\n",
    "dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n",
    "\n",
    "dataset[:2]"
   ],
   "id": "cb37eed51a63711e",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "         [ 1, 10, 11, 12, 13, 14,  2,  0]]),\n",
       " tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "         [ 1, 10, 11, 12, 13,  2,  0,  0]]))"
      ]
     },
     "execution_count": 199,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 199
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T06:53:40.226311Z",
     "start_time": "2025-05-26T06:53:37.146958Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 训练函数\n",
    "def train(n_epochs=10):\n",
    "    for epoch in range(n_epochs):\n",
    "        for input, target in dataloader:\n",
    "            # 准备解码器输入输出，其实这一步可以在数据处理时做掉\n",
    "            decoder_input = target[:, :-1]  # 移除最后一个token\n",
    "            decoder_target = target[:, 1:]  # 移除第一个token\n",
    "\n",
    "            # 解码器的自注意力未使用因果掩码，导致模型看到未来信息。\n",
    "            # 生成因果掩码 (shape: [batch_size, num_heads, seq_len, seq_len])\n",
    "            batch_size, seq_len = input.size(0), decoder_target.size(1)\n",
    "            mask = torch.tril(torch.ones(seq_len, seq_len)).unsqueeze(0).unsqueeze(0)\n",
    "            mask = mask.expand(batch_size, NUM_HEADS, seq_len, seq_len)\n",
    "\n",
    "            transformer_output = transformer(input, decoder_input, mask)\n",
    "\n",
    "            # 计算损失\n",
    "            loss = criterion(\n",
    "                transformer_output.reshape(-1, transformer_output.size(-1)),\n",
    "                decoder_target.reshape(-1)\n",
    "            )\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')\n",
    "\n",
    "\n",
    "# 开始训练\n",
    "train(n_epochs=20)"
   ],
   "id": "afd5b6caaa1076f6",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 7.3767\n",
      "Epoch 1, Loss: 6.0057\n",
      "Epoch 1, Loss: 5.6472\n",
      "Epoch 1, Loss: 6.9843\n",
      "Epoch 1, Loss: 6.0352\n",
      "Epoch 2, Loss: 3.5336\n",
      "Epoch 2, Loss: 3.7028\n",
      "Epoch 2, Loss: 4.9445\n",
      "Epoch 2, Loss: 2.8750\n",
      "Epoch 2, Loss: 5.0727\n",
      "Epoch 3, Loss: 2.7094\n",
      "Epoch 3, Loss: 3.9551\n",
      "Epoch 3, Loss: 2.6382\n",
      "Epoch 3, Loss: 2.6631\n",
      "Epoch 3, Loss: 4.1207\n",
      "Epoch 4, Loss: 2.3525\n",
      "Epoch 4, Loss: 2.0536\n",
      "Epoch 4, Loss: 1.8740\n",
      "Epoch 4, Loss: 3.3915\n",
      "Epoch 4, Loss: 2.7350\n",
      "Epoch 5, Loss: 1.4814\n",
      "Epoch 5, Loss: 2.7112\n",
      "Epoch 5, Loss: 1.3431\n",
      "Epoch 5, Loss: 2.1765\n",
      "Epoch 5, Loss: 1.1321\n",
      "Epoch 6, Loss: 1.8140\n",
      "Epoch 6, Loss: 0.7861\n",
      "Epoch 6, Loss: 0.8876\n",
      "Epoch 6, Loss: 0.7674\n",
      "Epoch 6, Loss: 1.3569\n",
      "Epoch 7, Loss: 1.1890\n",
      "Epoch 7, Loss: 0.5247\n",
      "Epoch 7, Loss: 0.5458\n",
      "Epoch 7, Loss: 0.4430\n",
      "Epoch 7, Loss: 0.8230\n",
      "Epoch 8, Loss: 0.7334\n",
      "Epoch 8, Loss: 0.3542\n",
      "Epoch 8, Loss: 0.4484\n",
      "Epoch 8, Loss: 0.3005\n",
      "Epoch 8, Loss: 0.2399\n",
      "Epoch 9, Loss: 0.3407\n",
      "Epoch 9, Loss: 0.2109\n",
      "Epoch 9, Loss: 0.2662\n",
      "Epoch 9, Loss: 0.1919\n",
      "Epoch 9, Loss: 0.1644\n",
      "Epoch 10, Loss: 0.1580\n",
      "Epoch 10, Loss: 0.1681\n",
      "Epoch 10, Loss: 0.1262\n",
      "Epoch 10, Loss: 0.1059\n",
      "Epoch 10, Loss: 0.1408\n",
      "Epoch 11, Loss: 0.0903\n",
      "Epoch 11, Loss: 0.0982\n",
      "Epoch 11, Loss: 0.0829\n",
      "Epoch 11, Loss: 0.0725\n",
      "Epoch 11, Loss: 0.1039\n",
      "Epoch 12, Loss: 0.0632\n",
      "Epoch 12, Loss: 0.0919\n",
      "Epoch 12, Loss: 0.0551\n",
      "Epoch 12, Loss: 0.0618\n",
      "Epoch 12, Loss: 0.0562\n",
      "Epoch 13, Loss: 0.0560\n",
      "Epoch 13, Loss: 0.0512\n",
      "Epoch 13, Loss: 0.0658\n",
      "Epoch 13, Loss: 0.0413\n",
      "Epoch 13, Loss: 0.0405\n",
      "Epoch 14, Loss: 0.0380\n",
      "Epoch 14, Loss: 0.0553\n",
      "Epoch 14, Loss: 0.0397\n",
      "Epoch 14, Loss: 0.0351\n",
      "Epoch 14, Loss: 0.0400\n",
      "Epoch 15, Loss: 0.0388\n",
      "Epoch 15, Loss: 0.0461\n",
      "Epoch 15, Loss: 0.0307\n",
      "Epoch 15, Loss: 0.0291\n",
      "Epoch 15, Loss: 0.0322\n",
      "Epoch 16, Loss: 0.0278\n",
      "Epoch 16, Loss: 0.0398\n",
      "Epoch 16, Loss: 0.0263\n",
      "Epoch 16, Loss: 0.0313\n",
      "Epoch 16, Loss: 0.0286\n",
      "Epoch 17, Loss: 0.0279\n",
      "Epoch 17, Loss: 0.0353\n",
      "Epoch 17, Loss: 0.0288\n",
      "Epoch 17, Loss: 0.0230\n",
      "Epoch 17, Loss: 0.0223\n",
      "Epoch 18, Loss: 0.0270\n",
      "Epoch 18, Loss: 0.0216\n",
      "Epoch 18, Loss: 0.0314\n",
      "Epoch 18, Loss: 0.0234\n",
      "Epoch 18, Loss: 0.0205\n",
      "Epoch 19, Loss: 0.0200\n",
      "Epoch 19, Loss: 0.0222\n",
      "Epoch 19, Loss: 0.0239\n",
      "Epoch 19, Loss: 0.0286\n",
      "Epoch 19, Loss: 0.0189\n",
      "Epoch 20, Loss: 0.0206\n",
      "Epoch 20, Loss: 0.0180\n",
      "Epoch 20, Loss: 0.0270\n",
      "Epoch 20, Loss: 0.0220\n",
      "Epoch 20, Loss: 0.0175\n"
     ]
    }
   ],
   "execution_count": 200
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:31:47.378285Z",
     "start_time": "2025-05-26T07:31:47.349163Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 翻译函数\n",
    "def translate(sentence):\n",
    "    tokens = tokenize_chinese(sentence)\n",
    "    numerical = [zh_word2idx.get(word, zh_word2idx['<unk>']) for word in tokens]\n",
    "    numerical = [zh_word2idx['<sos>']] + numerical + [zh_word2idx['<eos>']]\n",
    "    encoder_input = torch.tensor([numerical])\n",
    "    decoder_input = torch.LongTensor([[en_word2idx['<sos>']]])\n",
    "\n",
    "    memory = transformer.encoder(\n",
    "        transformer.input_embedding(encoder_input) + positional_encoding(encoder_input.size(1), EMBEDDING_SIZE))\n",
    "\n",
    "    for _ in range(50):\n",
    "        with torch.no_grad():\n",
    "            decoder_input_seq_len = decoder_input.size(1)\n",
    "            decoder_inputs = transformer.output_embedding(decoder_input) + positional_encoding(decoder_input_seq_len, EMBEDDING_SIZE)\n",
    "            mask = torch.tril(torch.ones(decoder_input_seq_len, decoder_input_seq_len)).unsqueeze(0).unsqueeze(0)\n",
    "            mask = mask.expand(1, NUM_HEADS, decoder_input_seq_len, decoder_input_seq_len)\n",
    "            decoder_outputs = transformer.decoder(inputs_decoder=decoder_inputs, encoder_outputs=memory, mask=mask)\n",
    "        pred_token = decoder_outputs[:, -1].argmax().item()\n",
    "        decoder_input = torch.cat([decoder_input, torch.tensor([[pred_token]])], dim=1)\n",
    "\n",
    "        if pred_token == en_word2idx['<eos>']:\n",
    "            break\n",
    "\n",
    "    return ' '.join([en_vocab[idx] for idx in decoder_input[0][1:-1]])\n",
    "\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"你吃饭了吗\"\n",
    "print(translate(test_sentence))  # 输出应接近 \"let's study together\""
   ],
   "id": "98694ad820e0e592",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "have you eaten yet?\n"
     ]
    }
   ],
   "execution_count": 259
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
