{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "jupyter": {
     "outputs_hidden": true
    },
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.323509Z",
     "start_time": "2025-07-25T09:44:04.557073Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "batch_size = 2\n",
    "zh_seq_len = 5\n",
    "en_seq_len = 6\n",
    "d_model = 512  # 词向量的长度\n",
    "vocab_size = 100\n",
    "\n",
    "# 生成两个句子，每个句子有5个词\n",
    "zh_sentences = torch.randint(0, vocab_size, (batch_size, zh_seq_len))\n",
    "en_sentences = torch.randint(0, vocab_size, (batch_size, en_seq_len))\n",
    "zh_sentences, en_sentences"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[60, 34, 55, 99, 21],\n",
       "         [58, 64, 42, 86, 41]]),\n",
       " tensor([[22, 81, 96, 42, 79, 14],\n",
       "         [83, 50,  6, 16, 14, 29]]))"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 1
  },
  {
   "cell_type": "code",
   "id": "d75a5f086f4eea9c",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.427457Z",
     "start_time": "2025-07-25T09:44:05.423814Z"
    }
   },
   "source": [
    "# 词嵌入层\n",
    "encoder_embedding = nn.Embedding(vocab_size, d_model)\n",
    "decoder_embedding = nn.Embedding(vocab_size, d_model)\n",
    "\n",
    "encoder_embeddings = encoder_embedding(zh_sentences)\n",
    "decoder_embeddings = decoder_embedding(en_sentences)\n",
    "encoder_embeddings.shape, decoder_embeddings.shape"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([2, 5, 512]), torch.Size([2, 6, 512]))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 2
  },
  {
   "cell_type": "code",
   "id": "b78fcbd2c1eeab9e",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.439615Z",
     "start_time": "2025-07-25T09:44:05.435801Z"
    }
   },
   "source": [
    "import math\n",
    "\n",
    "# 位置编码\n",
    "# 位置编码只跟位置有关，和具体位置上输入的数据无关\n",
    "max_seq_len = 128\n",
    "pe = torch.zeros(max_seq_len, d_model)\n",
    "position = torch.arange(0, max_seq_len, dtype=torch.float).unsqueeze(1)\n",
    "div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n",
    "pe[:, 0::2] = torch.sin(position * div_term)\n",
    "pe[:, 1::2] = torch.cos(position * div_term)\n",
    "pe"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.0000e+00,  1.0000e+00,  0.0000e+00,  ...,  1.0000e+00,\n",
       "          0.0000e+00,  1.0000e+00],\n",
       "        [ 8.4147e-01,  5.4030e-01,  8.2186e-01,  ...,  1.0000e+00,\n",
       "          1.0366e-04,  1.0000e+00],\n",
       "        [ 9.0930e-01, -4.1615e-01,  9.3641e-01,  ...,  1.0000e+00,\n",
       "          2.0733e-04,  1.0000e+00],\n",
       "        ...,\n",
       "        [-6.1604e-01,  7.8771e-01,  9.3283e-01,  ...,  9.9991e-01,\n",
       "          1.2958e-02,  9.9992e-01],\n",
       "        [ 3.2999e-01,  9.4398e-01,  8.2756e-01,  ...,  9.9991e-01,\n",
       "          1.3061e-02,  9.9991e-01],\n",
       "        [ 9.7263e-01,  2.3236e-01,  1.0089e-02,  ...,  9.9991e-01,\n",
       "          1.3165e-02,  9.9991e-01]])"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "code",
   "id": "220f121c2cc2e2ef",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.716135Z",
     "start_time": "2025-07-25T09:44:05.713314Z"
    }
   },
   "source": [
    "# 位置编码+词向量\n",
    "encoder_inputs = encoder_embeddings + pe[:encoder_embeddings.size(1)]\n",
    "encoder_inputs.shape"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 5, 512])"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.793737Z",
     "start_time": "2025-07-25T09:44:05.791180Z"
    }
   },
   "cell_type": "code",
   "source": [
    "decoder_inputs = decoder_embeddings + pe[:decoder_embeddings.size(1)]\n",
    "decoder_inputs.shape"
   ],
   "id": "c30819ec94252c40",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 6, 512])"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "cell_type": "code",
   "id": "b87ee8e199a33508",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:05.809610Z",
     "start_time": "2025-07-25T09:44:05.804579Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "\n",
    "\n",
    "class MultiHeadAttention(nn.Module):\n",
    "\n",
    "    def __init__(self, embed_dim: int, attn_dim: int, output_dim: int, num_heads: int):\n",
    "        super().__init__()\n",
    "\n",
    "        self.embed_dim = embed_dim\n",
    "        self.attn_dim = attn_dim\n",
    "        self.output_dim = output_dim\n",
    "        self.num_heads = num_heads\n",
    "        self.head_dim = attn_dim // num_heads  # //表示向下取整，attn_dim是head_dim的整数倍\n",
    "\n",
    "        # QKV投影层：从输入维度映射到内部维度\n",
    "        # projection\n",
    "        self.q_proj = nn.Linear(embed_dim, self.attn_dim)\n",
    "        self.k_proj = nn.Linear(embed_dim, self.attn_dim)\n",
    "        self.v_proj = nn.Linear(embed_dim, self.attn_dim)\n",
    "\n",
    "        # 输出投影层：从内部维度映射到输出维度\n",
    "        self.out_proj = nn.Linear(self.attn_dim, self.output_dim)\n",
    "\n",
    "    def forward(self, q_x, k_x, v_x, mask=None):\n",
    "        \"\"\"\n",
    "        输入: [batch_size, seq_len, embed_dim]\n",
    "        返回: [batch_size, seq_len, output_dim]\n",
    "        \"\"\"\n",
    "        batch_size, q_seq_len, embed_dim = q_x.shape\n",
    "        batch_size, k_seq_len, embed_dim = k_x.shape\n",
    "\n",
    "        # 投影到QKV空间\n",
    "        q = self.q_proj(q_x)  # [batch_size, seq_len, attn_dim]\n",
    "        k = self.k_proj(k_x)  # [batch_size, seq_len, attn_dim]\n",
    "        v = self.v_proj(v_x)  # [batch_size, seq_len, attn_dim]\n",
    "\n",
    "        # [batch_size, seq_len, num_heads, head_dim]\n",
    "        # 分割多头 [batch_size, num_heads, seq_len, head_dim]\n",
    "        q = q.view(batch_size, q_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "        k = k.view(batch_size, k_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "        v = v.view(batch_size, k_seq_len, self.num_heads, self.head_dim).transpose(1, 2)\n",
    "\n",
    "        # 计算注意力得分\n",
    "        # q   [batch_size, num_heads, seq_len, head_dim]\n",
    "        # k.T [batch_size, num_heads, head_dim, seq_len]\n",
    "        # q @ k.T 形状: [batch_size, num_heads, seq_len, seq_len]\n",
    "        attn_scores = torch.matmul(q, k.transpose(-2, -1))\n",
    "\n",
    "        # 缩放因子：防止乘积过大\n",
    "        d_k = k.size(-1)\n",
    "        attn_scores = attn_scores / torch.sqrt(torch.tensor(d_k))\n",
    "\n",
    "        if mask is not None:\n",
    "            attn_scores = attn_scores.masked_fill(mask == 0, float('-inf'))\n",
    "\n",
    "        # 计算注意力权重\n",
    "        attn_weights = torch.softmax(attn_scores, dim=-1)\n",
    "\n",
    "        # 计算注意力输出\n",
    "        # attn_weights [batch_size, num_heads, seq_len, seq_len]\n",
    "        # v            [batch_size, num_heads, seq_len, head_dim]\n",
    "        # [batch_size, num_heads, seq_len, head_dim]\n",
    "        attn_out = torch.matmul(attn_weights, v)\n",
    "\n",
    "        # 合并多头 [batch_size, seq_len, attn_dim]\n",
    "\n",
    "        # [batch_size, seq_len, num_heads, head_dim]\n",
    "        attn_out = attn_out.transpose(1, 2).reshape(batch_size, q_seq_len, self.attn_dim)\n",
    "\n",
    "        # 投影到输出空间\n",
    "        return self.out_proj(attn_out)"
   ],
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.094507Z",
     "start_time": "2025-07-25T09:44:06.092081Z"
    }
   },
   "cell_type": "code",
   "source": [
    "## Feed Forward，两个线性层，最后输出维度不变\n",
    "class FeedForward(nn.Module):\n",
    "    def __init__(self, d_model, d_ff):\n",
    "        super().__init__()\n",
    "        self.fc1 = nn.Linear(d_model, d_ff)\n",
    "        self.fc2 = nn.Linear(d_ff, d_model)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.fc2(torch.relu(self.fc1(x)))"
   ],
   "id": "3f0a620b3d7631d5",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.147119Z",
     "start_time": "2025-07-25T09:44:06.143705Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义Encoder\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads, num_encoder_layers):\n",
    "        super().__init__()\n",
    "        self.layers = nn.ModuleList([EncoderLayer(d_model, d_ff, num_heads) for _ in range(num_encoder_layers)])\n",
    "\n",
    "    def forward(self, x):\n",
    "        for layer in self.layers:\n",
    "            x = layer(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "# 定义EncoderLayer\n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads):\n",
    "        super().__init__()\n",
    "        self.mha = MultiHeadAttention(embed_dim=d_model, attn_dim=d_model, output_dim=d_model, num_heads=num_heads)\n",
    "        self.ff = FeedForward(d_model, d_ff)\n",
    "        self.layer_norm1 = nn.LayerNorm(d_model)\n",
    "        self.layer_norm2 = nn.LayerNorm(d_model)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # 1.多头注意力\n",
    "        # 2.残差连接和层归一化\n",
    "        x = self.layer_norm1(x + self.mha(q_x=x, k_x=x, v_x=x, mask=None))\n",
    "\n",
    "        # 3.Feed Forward\n",
    "        # 4.残差连接和层归一化\n",
    "        return self.layer_norm2(x + self.ff(x))"
   ],
   "id": "f013d4d174580b73",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.184548Z",
     "start_time": "2025-07-25T09:44:06.180620Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 定义Decoder\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads, num_decoder_layers):\n",
    "        super().__init__()\n",
    "        self.layers = nn.ModuleList([DecoderLayer(d_model, d_ff, num_heads) for _ in range(num_decoder_layers)])\n",
    "\n",
    "    def forward(self, decoder_inputs, encoder_outputs, mask=None):\n",
    "        for layer in self.layers:\n",
    "            decoder_inputs = layer(decoder_inputs, encoder_outputs, mask=mask)\n",
    "        return decoder_inputs\n",
    "\n",
    "\n",
    "# 定义DecoderLayer\n",
    "class DecoderLayer(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads):\n",
    "        super().__init__()\n",
    "        self.masked_mha = MultiHeadAttention(embed_dim=d_model, attn_dim=d_model, output_dim=d_model,\n",
    "                                             num_heads=num_heads)\n",
    "        self.cross_mha = MultiHeadAttention(embed_dim=d_model, attn_dim=d_model, output_dim=d_model,\n",
    "                                            num_heads=num_heads)\n",
    "        self.ff = FeedForward(d_model, d_ff)\n",
    "        self.layer_norm1 = nn.LayerNorm(d_model)\n",
    "        self.layer_norm2 = nn.LayerNorm(d_model)\n",
    "        self.layer_norm3 = nn.LayerNorm(d_model)\n",
    "\n",
    "    def forward(self, decoder_inputs, encoder_outputs, mask=None):\n",
    "        # 1.masked多头注意力\n",
    "        # 2.残差连接和层归一化\n",
    "        x = self.layer_norm1(\n",
    "            decoder_inputs + self.masked_mha(q_x=decoder_inputs, k_x=decoder_inputs, v_x=decoder_inputs, mask=mask))\n",
    "\n",
    "        # 3.cross多头注意力\n",
    "        # 4.残差连接和层归一化\n",
    "        x = self.layer_norm2(x + self.cross_mha(q_x=x, k_x=encoder_outputs, v_x=encoder_outputs))\n",
    "\n",
    "        # 5.Feed Forward\n",
    "        # 6.残差连接和层归一化\n",
    "        return self.layer_norm3(x + self.ff(x))"
   ],
   "id": "ec280a278481442d",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.282407Z",
     "start_time": "2025-07-25T09:44:06.213652Z"
    }
   },
   "cell_type": "code",
   "source": [
    "encoder = Encoder(d_model=d_model, d_ff=2048, num_heads=8, num_encoder_layers=6)\n",
    "\n",
    "encoder_outputs = encoder(encoder_inputs)\n",
    "encoder_outputs.shape"
   ],
   "id": "eee8fb2ee0dd3cb2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 5, 512])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 10
  },
  {
   "cell_type": "code",
   "id": "7cadc119cd37aaa3",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.395081Z",
     "start_time": "2025-07-25T09:44:06.303931Z"
    }
   },
   "source": [
    "decoder = Decoder(d_model=d_model, d_ff=2048, num_heads=8, num_decoder_layers=6)\n",
    "\n",
    "mask = torch.tril(torch.ones(en_seq_len, en_seq_len))\n",
    "decoder_outputs = decoder(decoder_inputs=decoder_inputs, encoder_outputs=encoder_outputs, mask=mask)\n",
    "decoder_outputs.shape"
   ],
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 6, 512])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.420216Z",
     "start_time": "2025-07-25T09:44:06.417428Z"
    }
   },
   "cell_type": "code",
   "source": [
    "fc = nn.Linear(d_model, vocab_size)\n",
    "decoder_outputs_vocab = fc(decoder_outputs)\n",
    "decoder_outputs_vocab.shape"
   ],
   "id": "1f9f3d935328ae77",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 6, 100])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.436105Z",
     "start_time": "2025-07-25T09:44:06.433616Z"
    }
   },
   "cell_type": "code",
   "source": [
    "outputs = torch.softmax(decoder_outputs_vocab, dim=-1)\n",
    "outputs.shape"
   ],
   "id": "19264645e4ea0b0d",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 6, 100])"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.463365Z",
     "start_time": "2025-07-25T09:44:06.460844Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Transformer(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads, num_encoder_layers, num_decoder_layers):\n",
    "        super().__init__()\n",
    "        self.encoder = Encoder(d_model, d_ff, num_heads, num_encoder_layers)\n",
    "        self.decoder = Decoder(d_model, d_ff, num_heads, num_decoder_layers)\n",
    "        self.fc = nn.Linear(d_model, vocab_size)\n",
    "\n",
    "    def forward(self, encoder_inputs, decoder_inputs, mask=None):\n",
    "\n",
    "        # 课上代码如下，有问题，用的是外面定义的，应该用self的\n",
    "        # encoder_outputs = encoder(encoder_inputs)\n",
    "        # decoder_outputs = decoder(decoder_inputs=decoder_inputs, encoder_outputs=encoder_outputs, mask=mask)\n",
    "        encoder_outputs = self.encoder(encoder_inputs)\n",
    "        decoder_outputs = self.decoder(decoder_inputs=decoder_inputs, encoder_outputs=encoder_outputs, mask=mask)\n",
    "\n",
    "        return self.fc(decoder_outputs)"
   ],
   "id": "a4e167693da7d3cf",
   "outputs": [],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.625882Z",
     "start_time": "2025-07-25T09:44:06.478091Z"
    }
   },
   "cell_type": "code",
   "source": [
    "transformer = Transformer(d_model=d_model, d_ff=2048, num_heads=8, num_encoder_layers=6, num_decoder_layers=6)\n",
    "mask = torch.tril(torch.ones(en_seq_len, en_seq_len))\n",
    "outputs = transformer(encoder_inputs, decoder_inputs, mask)\n",
    "outputs.shape"
   ],
   "id": "f34a2a8b7531e34a",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([2, 6, 100])"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.911034Z",
     "start_time": "2025-07-25T09:44:06.629362Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from collections import Counter\n",
    "\n",
    "import jieba\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "# 自定义简单数据集\n",
    "data = [\n",
    "    (\"你好，今天天气真好！\", \"Hello, the weather is nice today!\"),\n",
    "    (\"你吃饭了吗？\", \"Have you eaten yet?\"),\n",
    "    (\"深度学习很有趣。\", \"Deep learning is interesting.\"),\n",
    "    (\"我们一起学习吧。\", \"Let's study together.\"),\n",
    "    (\"这是一个测试例子。\", \"This is a test example.\")\n",
    "]\n",
    "\n",
    "\n",
    "# 中文分词函数\n",
    "def chinese_split(text):\n",
    "    return list(jieba.cut(text))  # 使用结巴分词\n",
    "\n",
    "\n",
    "# 英文分词函数\n",
    "def english_split(text):\n",
    "    return text.lower().split()\n",
    "\n",
    "\n",
    "# 处理原始数据\n",
    "chinese_sentences = [chinese_split(pair[0]) for pair in data]\n",
    "english_sentences = [english_split(pair[1]) for pair in data]\n",
    "\n",
    "chinese_sentences, english_sentences"
   ],
   "id": "ce053a7f01e08ecc",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Building prefix dict from the default dictionary ...\n",
      "Loading model from cache /var/folders/vl/mkwcfmqd5kb3rykv5bb3w3n40000gn/T/jieba.cache\n",
      "Loading model cost 0.265 seconds.\n",
      "Prefix dict has been built successfully.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "([['你好', '，', '今天天气', '真', '好', '！'],\n",
       "  ['你', '吃饭', '了', '吗', '？'],\n",
       "  ['深度', '学习', '很', '有趣', '。'],\n",
       "  ['我们', '一起', '学习', '吧', '。'],\n",
       "  ['这是', '一个', '测试', '例子', '。']],\n",
       " [['hello,', 'the', 'weather', 'is', 'nice', 'today!'],\n",
       "  ['have', 'you', 'eaten', 'yet?'],\n",
       "  ['deep', 'learning', 'is', 'interesting.'],\n",
       "  [\"let's\", 'study', 'together.'],\n",
       "  ['this', 'is', 'a', 'test', 'example.']])"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.926924Z",
     "start_time": "2025-07-25T09:44:06.923799Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 处理特殊符号\n",
    "special_tokens = ['<pad>', '<bos>', '<eos>', '<unk>']\n",
    "\n",
    "\n",
    "# 构建词汇表\n",
    "def build_vocab(sentences):\n",
    "    counter = Counter()\n",
    "\n",
    "    for sentence in sentences:\n",
    "        for word in sentence:\n",
    "            counter[word] += 1\n",
    "\n",
    "    vocab = special_tokens.copy()\n",
    "\n",
    "    for word, count in counter.items():\n",
    "        if word not in special_tokens:\n",
    "            vocab.append(word)\n",
    "\n",
    "    word2idx = {word: idx for idx, word in enumerate(vocab)}\n",
    "    return vocab, word2idx\n",
    "\n",
    "\n",
    "# 构建中英文词汇表\n",
    "zh_vocab, zh_word2idx = build_vocab([sentence for sentence in chinese_sentences])\n",
    "\n",
    "en_vocab, en_word2idx = build_vocab([sentence for sentence in english_sentences])\n",
    "\n",
    "# print(zh_vocab)\n",
    "# print(en_vocab)\n",
    "print(zh_word2idx)\n",
    "print(en_word2idx)"
   ],
   "id": "ba851494e9bff76e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'<pad>': 0, '<bos>': 1, '<eos>': 2, '<unk>': 3, '你好': 4, '，': 5, '今天天气': 6, '真': 7, '好': 8, '！': 9, '你': 10, '吃饭': 11, '了': 12, '吗': 13, '？': 14, '深度': 15, '学习': 16, '很': 17, '有趣': 18, '。': 19, '我们': 20, '一起': 21, '吧': 22, '这是': 23, '一个': 24, '测试': 25, '例子': 26}\n",
      "{'<pad>': 0, '<bos>': 1, '<eos>': 2, '<unk>': 3, 'hello,': 4, 'the': 5, 'weather': 6, 'is': 7, 'nice': 8, 'today!': 9, 'have': 10, 'you': 11, 'eaten': 12, 'yet?': 13, 'deep': 14, 'learning': 15, 'interesting.': 16, \"let's\": 17, 'study': 18, 'together.': 19, 'this': 20, 'a': 21, 'test': 22, 'example.': 23}\n"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.952911Z",
     "start_time": "2025-07-25T09:44:06.948398Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 参数设置\n",
    "ZH_VOCAB_SIZE = len(zh_vocab)\n",
    "EN_VOCAB_SIZE = len(en_vocab)\n",
    "HIDDEN_SIZE = 256\n",
    "BATCH_SIZE = 2\n",
    "LEARNING_RATE = 0.005\n",
    "\n",
    "\n",
    "def tokenize(words, word2idx):\n",
    "    # 如果某个词在字典中找不到，则用'<unk>'的索引代替\n",
    "    return [word2idx.get(word, word2idx['<unk>']) for word in words]\n",
    "\n",
    "\n",
    "processed_data_ch = []\n",
    "processed_data_en = []\n",
    "for ch, en in zip(chinese_sentences, english_sentences):\n",
    "    # ch，en分别是一个中文句子和对应的英文句子\n",
    "    # 在每个句子的前面加上'<bos>'，在每个句子的后面加上'<eos>'，这样大模型才能知道什么时候停止生成句子\n",
    "    ch_numerical = tokenize(ch, zh_word2idx)\n",
    "    en_numerical = [en_word2idx['<bos>']] + tokenize(en, en_word2idx) + [en_word2idx['<eos>']]\n",
    "    processed_data_ch.append(torch.LongTensor(ch_numerical))\n",
    "    processed_data_en.append(torch.LongTensor(en_numerical))\n",
    "\n",
    "# print(processed_data_ch)\n",
    "processed_data_en"
   ],
   "id": "bfc7c186f1217e14",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[tensor([1, 4, 5, 6, 7, 8, 9, 2]),\n",
       " tensor([ 1, 10, 11, 12, 13,  2]),\n",
       " tensor([ 1, 14, 15,  7, 16,  2]),\n",
       " tensor([ 1, 17, 18, 19,  2]),\n",
       " tensor([ 1, 20,  7, 21, 22, 23,  2])]"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:06.981991Z",
     "start_time": "2025-07-25T09:44:06.978996Z"
    }
   },
   "cell_type": "code",
   "source": [
    "processed_data_ch_pad = nn.utils.rnn.pad_sequence(processed_data_ch, batch_first=True,\n",
    "                                                  padding_value=zh_word2idx['<pad>'])\n",
    "processed_data_ch_pad"
   ],
   "id": "7e8909062d1e80f2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 4,  5,  6,  7,  8,  9],\n",
       "        [10, 11, 12, 13, 14,  0],\n",
       "        [15, 16, 17, 18, 19,  0],\n",
       "        [20, 21, 16, 22, 19,  0],\n",
       "        [23, 24, 25, 26, 19,  0]])"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 19
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:07.061731Z",
     "start_time": "2025-07-25T09:44:07.058848Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_en_pad = nn.utils.rnn.pad_sequence(processed_data_en, batch_first=True,\n",
    "                                                  padding_value=en_word2idx['<pad>'])\n",
    "processed_data_en_pad"
   ],
   "id": "50c016488093de20",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "        [ 1, 10, 11, 12, 13,  2,  0,  0],\n",
       "        [ 1, 14, 15,  7, 16,  2,  0,  0],\n",
       "        [ 1, 17, 18, 19,  2,  0,  0,  0],\n",
       "        [ 1, 20,  7, 21, 22, 23,  2,  0]])"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:07.120005Z",
     "start_time": "2025-07-25T09:44:07.116731Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "dataset = TensorDataset(processed_data_ch_pad, processed_data_en_pad)\n",
    "dataloader = DataLoader(dataset, batch_size=1, shuffle=True)\n",
    "\n",
    "for src, trg in dataloader:\n",
    "    print(src)\n",
    "    print(trg)\n",
    "    break"
   ],
   "id": "19524ded28178041",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[23, 24, 25, 26, 19,  0]])\n",
      "tensor([[ 1, 20,  7, 21, 22, 23,  2,  0]])\n"
     ]
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:07.166915Z",
     "start_time": "2025-07-25T09:44:07.163768Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Transformer(nn.Module):\n",
    "    def __init__(self, d_model, d_ff, num_heads, num_encoder_layers, num_decoder_layers):\n",
    "        super().__init__()\n",
    "        self.encoder_embedding = nn.Embedding(vocab_size, d_model)\n",
    "        self.decoder_embedding = nn.Embedding(vocab_size, d_model)\n",
    "        self.encoder = Encoder(d_model, d_ff, num_heads, num_encoder_layers)\n",
    "        self.decoder = Decoder(d_model, d_ff, num_heads, num_decoder_layers)\n",
    "        self.fc = nn.Linear(d_model, vocab_size)\n",
    "\n",
    "    def forward(self, encoder_inputs, decoder_inputs, mask=None):\n",
    "        encoder_inputs = self.encoder_embedding(encoder_inputs)\n",
    "        decoder_inputs = self.decoder_embedding(decoder_inputs)\n",
    "\n",
    "        encoder_inputs = encoder_inputs + pe[:encoder_inputs.size(1), :]\n",
    "        decoder_inputs = decoder_inputs + pe[:decoder_inputs.size(1), :]\n",
    "\n",
    "        # 课上代码如下，有问题，用的是外面定义的，应该用self的\n",
    "        # encoder_outputs = encoder(encoder_inputs)\n",
    "        # decoder_outputs = decoder(decoder_inputs=decoder_inputs, encoder_outputs=encoder_outputs, mask=mask)\n",
    "        encoder_outputs = self.encoder(encoder_inputs)\n",
    "        decoder_outputs = self.decoder(decoder_inputs=decoder_inputs, encoder_outputs=encoder_outputs, mask=mask)\n",
    "        return self.fc(decoder_outputs)"
   ],
   "id": "b9f10077b71810ed",
   "outputs": [],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:07.831421Z",
     "start_time": "2025-07-25T09:44:07.190137Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# transformer = Transformer(d_model=d_model, d_ff=2048, num_heads=8, num_encoder_layers=6, num_decoder_layers=6)\n",
    "# optimizer = optim.Adam(transformer.parameters(), lr=0.01)\n",
    "\n",
    "# 训练数据太少了，模型也得小一点，这样才能训练出效果，学习率也得调小一点\n",
    "transformer = Transformer(d_model=d_model, d_ff=2048, num_heads=1, num_encoder_layers=2, num_decoder_layers=2)\n",
    "optimizer = optim.Adam(transformer.parameters(), lr=0.0001)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=en_word2idx['<pad>'])"
   ],
   "id": "8d5734d16acad79e",
   "outputs": [],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:17.015016Z",
     "start_time": "2025-07-25T09:44:07.844649Z"
    }
   },
   "cell_type": "code",
   "source": [
    "for epoch in range(50):\n",
    "    # input： tensor([[4, 5, 6, 7, 8, 9]])\n",
    "    # target：tensor([[1, 4, 5, 6, 7, 8, 9, 2]])\n",
    "    for input, target in dataloader:\n",
    "        # 准备解码器输入输出，其实这一步可以在数据处理时做掉\n",
    "        decoder_input = target[:, :-1]  # 移除最后一个token  tensor([[1, 4, 5, 6, 7, 8, 9]])\n",
    "        decoder_target = target[:, 1:]  # 移除第一个token    tensor([[4, 5, 6, 7, 8, 9, 2]])\n",
    "\n",
    "        mask = torch.tril(torch.ones(decoder_input.size(1), decoder_input.size(1)))\n",
    "        decoder_outputs = transformer(input, decoder_input, mask)\n",
    "\n",
    "        # 计算损失\n",
    "        loss = criterion(\n",
    "            # decoder_output本来是(batch_size, seq_len, vocab_size)，变成(batch_size * seq_len, vocab_size)\n",
    "            # decoder_target本来是(batch_size, seq_len)，变成(batch_size * seq_len)\n",
    "            decoder_outputs.view(-1, decoder_outputs.size(-1)),\n",
    "            decoder_target.view(-1)\n",
    "        )\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')"
   ],
   "id": "2573befd839be55c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 4.9463\n",
      "Epoch 1, Loss: 4.6200\n",
      "Epoch 1, Loss: 4.2976\n",
      "Epoch 1, Loss: 4.2473\n",
      "Epoch 1, Loss: 3.9214\n",
      "Epoch 2, Loss: 2.0653\n",
      "Epoch 2, Loss: 2.3686\n",
      "Epoch 2, Loss: 3.0510\n",
      "Epoch 2, Loss: 2.6184\n",
      "Epoch 2, Loss: 2.0156\n",
      "Epoch 3, Loss: 1.3953\n",
      "Epoch 3, Loss: 1.7652\n",
      "Epoch 3, Loss: 1.8852\n",
      "Epoch 3, Loss: 1.4122\n",
      "Epoch 3, Loss: 1.1573\n",
      "Epoch 4, Loss: 1.1536\n",
      "Epoch 4, Loss: 0.6166\n",
      "Epoch 4, Loss: 0.7649\n",
      "Epoch 4, Loss: 0.9722\n",
      "Epoch 4, Loss: 0.4778\n",
      "Epoch 5, Loss: 0.5099\n",
      "Epoch 5, Loss: 0.2817\n",
      "Epoch 5, Loss: 0.5702\n",
      "Epoch 5, Loss: 0.3724\n",
      "Epoch 5, Loss: 0.1625\n",
      "Epoch 6, Loss: 0.3595\n",
      "Epoch 6, Loss: 0.1484\n",
      "Epoch 6, Loss: 0.1989\n",
      "Epoch 6, Loss: 0.1851\n",
      "Epoch 6, Loss: 0.0787\n",
      "Epoch 7, Loss: 0.0692\n",
      "Epoch 7, Loss: 0.1099\n",
      "Epoch 7, Loss: 0.1496\n",
      "Epoch 7, Loss: 0.1184\n",
      "Epoch 7, Loss: 0.0792\n",
      "Epoch 8, Loss: 0.0388\n",
      "Epoch 8, Loss: 0.1023\n",
      "Epoch 8, Loss: 0.0654\n",
      "Epoch 8, Loss: 0.0814\n",
      "Epoch 8, Loss: 0.0414\n",
      "Epoch 9, Loss: 0.0529\n",
      "Epoch 9, Loss: 0.0249\n",
      "Epoch 9, Loss: 0.0334\n",
      "Epoch 9, Loss: 0.0609\n",
      "Epoch 9, Loss: 0.0553\n",
      "Epoch 10, Loss: 0.0277\n",
      "Epoch 10, Loss: 0.0513\n",
      "Epoch 10, Loss: 0.0466\n",
      "Epoch 10, Loss: 0.0180\n",
      "Epoch 10, Loss: 0.0327\n",
      "Epoch 11, Loss: 0.0393\n",
      "Epoch 11, Loss: 0.0300\n",
      "Epoch 11, Loss: 0.0388\n",
      "Epoch 11, Loss: 0.0196\n",
      "Epoch 11, Loss: 0.0149\n",
      "Epoch 12, Loss: 0.0184\n",
      "Epoch 12, Loss: 0.0141\n",
      "Epoch 12, Loss: 0.0288\n",
      "Epoch 12, Loss: 0.0233\n",
      "Epoch 12, Loss: 0.0310\n",
      "Epoch 13, Loss: 0.0218\n",
      "Epoch 13, Loss: 0.0251\n",
      "Epoch 13, Loss: 0.0152\n",
      "Epoch 13, Loss: 0.0118\n",
      "Epoch 13, Loss: 0.0273\n",
      "Epoch 14, Loss: 0.0222\n",
      "Epoch 14, Loss: 0.0111\n",
      "Epoch 14, Loss: 0.0180\n",
      "Epoch 14, Loss: 0.0249\n",
      "Epoch 14, Loss: 0.0134\n",
      "Epoch 15, Loss: 0.0168\n",
      "Epoch 15, Loss: 0.0231\n",
      "Epoch 15, Loss: 0.0127\n",
      "Epoch 15, Loss: 0.0186\n",
      "Epoch 15, Loss: 0.0097\n",
      "Epoch 16, Loss: 0.0179\n",
      "Epoch 16, Loss: 0.0206\n",
      "Epoch 16, Loss: 0.0147\n",
      "Epoch 16, Loss: 0.0117\n",
      "Epoch 16, Loss: 0.0091\n",
      "Epoch 17, Loss: 0.0090\n",
      "Epoch 17, Loss: 0.0187\n",
      "Epoch 17, Loss: 0.0110\n",
      "Epoch 17, Loss: 0.0134\n",
      "Epoch 17, Loss: 0.0155\n",
      "Epoch 18, Loss: 0.0083\n",
      "Epoch 18, Loss: 0.0128\n",
      "Epoch 18, Loss: 0.0148\n",
      "Epoch 18, Loss: 0.0168\n",
      "Epoch 18, Loss: 0.0102\n",
      "Epoch 19, Loss: 0.0101\n",
      "Epoch 19, Loss: 0.0160\n",
      "Epoch 19, Loss: 0.0138\n",
      "Epoch 19, Loss: 0.0117\n",
      "Epoch 19, Loss: 0.0075\n",
      "Epoch 20, Loss: 0.0132\n",
      "Epoch 20, Loss: 0.0112\n",
      "Epoch 20, Loss: 0.0147\n",
      "Epoch 20, Loss: 0.0072\n",
      "Epoch 20, Loss: 0.0091\n",
      "Epoch 21, Loss: 0.0071\n",
      "Epoch 21, Loss: 0.0121\n",
      "Epoch 21, Loss: 0.0104\n",
      "Epoch 21, Loss: 0.0088\n",
      "Epoch 21, Loss: 0.0136\n",
      "Epoch 22, Loss: 0.0115\n",
      "Epoch 22, Loss: 0.0066\n",
      "Epoch 22, Loss: 0.0098\n",
      "Epoch 22, Loss: 0.0130\n",
      "Epoch 22, Loss: 0.0083\n",
      "Epoch 23, Loss: 0.0082\n",
      "Epoch 23, Loss: 0.0108\n",
      "Epoch 23, Loss: 0.0124\n",
      "Epoch 23, Loss: 0.0062\n",
      "Epoch 23, Loss: 0.0092\n",
      "Epoch 24, Loss: 0.0061\n",
      "Epoch 24, Loss: 0.0118\n",
      "Epoch 24, Loss: 0.0089\n",
      "Epoch 24, Loss: 0.0076\n",
      "Epoch 24, Loss: 0.0100\n",
      "Epoch 25, Loss: 0.0112\n",
      "Epoch 25, Loss: 0.0098\n",
      "Epoch 25, Loss: 0.0073\n",
      "Epoch 25, Loss: 0.0084\n",
      "Epoch 25, Loss: 0.0057\n",
      "Epoch 26, Loss: 0.0106\n",
      "Epoch 26, Loss: 0.0081\n",
      "Epoch 26, Loss: 0.0070\n",
      "Epoch 26, Loss: 0.0055\n",
      "Epoch 26, Loss: 0.0091\n",
      "Epoch 27, Loss: 0.0100\n",
      "Epoch 27, Loss: 0.0077\n",
      "Epoch 27, Loss: 0.0089\n",
      "Epoch 27, Loss: 0.0053\n",
      "Epoch 27, Loss: 0.0066\n",
      "Epoch 28, Loss: 0.0095\n",
      "Epoch 28, Loss: 0.0085\n",
      "Epoch 28, Loss: 0.0065\n",
      "Epoch 28, Loss: 0.0073\n",
      "Epoch 28, Loss: 0.0050\n",
      "Epoch 29, Loss: 0.0071\n",
      "Epoch 29, Loss: 0.0081\n",
      "Epoch 29, Loss: 0.0049\n",
      "Epoch 29, Loss: 0.0089\n",
      "Epoch 29, Loss: 0.0061\n",
      "Epoch 30, Loss: 0.0078\n",
      "Epoch 30, Loss: 0.0087\n",
      "Epoch 30, Loss: 0.0047\n",
      "Epoch 30, Loss: 0.0059\n",
      "Epoch 30, Loss: 0.0066\n",
      "Epoch 31, Loss: 0.0066\n",
      "Epoch 31, Loss: 0.0046\n",
      "Epoch 31, Loss: 0.0074\n",
      "Epoch 31, Loss: 0.0082\n",
      "Epoch 31, Loss: 0.0057\n",
      "Epoch 32, Loss: 0.0072\n",
      "Epoch 32, Loss: 0.0044\n",
      "Epoch 32, Loss: 0.0056\n",
      "Epoch 32, Loss: 0.0061\n",
      "Epoch 32, Loss: 0.0079\n",
      "Epoch 33, Loss: 0.0043\n",
      "Epoch 33, Loss: 0.0068\n",
      "Epoch 33, Loss: 0.0060\n",
      "Epoch 33, Loss: 0.0076\n",
      "Epoch 33, Loss: 0.0053\n",
      "Epoch 34, Loss: 0.0075\n",
      "Epoch 34, Loss: 0.0041\n",
      "Epoch 34, Loss: 0.0052\n",
      "Epoch 34, Loss: 0.0065\n",
      "Epoch 34, Loss: 0.0057\n",
      "Epoch 35, Loss: 0.0050\n",
      "Epoch 35, Loss: 0.0056\n",
      "Epoch 35, Loss: 0.0070\n",
      "Epoch 35, Loss: 0.0063\n",
      "Epoch 35, Loss: 0.0039\n",
      "Epoch 36, Loss: 0.0048\n",
      "Epoch 36, Loss: 0.0038\n",
      "Epoch 36, Loss: 0.0053\n",
      "Epoch 36, Loss: 0.0061\n",
      "Epoch 36, Loss: 0.0067\n",
      "Epoch 37, Loss: 0.0052\n",
      "Epoch 37, Loss: 0.0046\n",
      "Epoch 37, Loss: 0.0059\n",
      "Epoch 37, Loss: 0.0065\n",
      "Epoch 37, Loss: 0.0037\n",
      "Epoch 38, Loss: 0.0064\n",
      "Epoch 38, Loss: 0.0050\n",
      "Epoch 38, Loss: 0.0057\n",
      "Epoch 38, Loss: 0.0044\n",
      "Epoch 38, Loss: 0.0036\n",
      "Epoch 39, Loss: 0.0048\n",
      "Epoch 39, Loss: 0.0044\n",
      "Epoch 39, Loss: 0.0035\n",
      "Epoch 39, Loss: 0.0055\n",
      "Epoch 39, Loss: 0.0060\n",
      "Epoch 40, Loss: 0.0042\n",
      "Epoch 40, Loss: 0.0046\n",
      "Epoch 40, Loss: 0.0053\n",
      "Epoch 40, Loss: 0.0034\n",
      "Epoch 40, Loss: 0.0058\n",
      "Epoch 41, Loss: 0.0045\n",
      "Epoch 41, Loss: 0.0033\n",
      "Epoch 41, Loss: 0.0057\n",
      "Epoch 41, Loss: 0.0051\n",
      "Epoch 41, Loss: 0.0040\n",
      "Epoch 42, Loss: 0.0044\n",
      "Epoch 42, Loss: 0.0050\n",
      "Epoch 42, Loss: 0.0055\n",
      "Epoch 42, Loss: 0.0039\n",
      "Epoch 42, Loss: 0.0031\n",
      "Epoch 43, Loss: 0.0031\n",
      "Epoch 43, Loss: 0.0042\n",
      "Epoch 43, Loss: 0.0038\n",
      "Epoch 43, Loss: 0.0053\n",
      "Epoch 43, Loss: 0.0048\n",
      "Epoch 44, Loss: 0.0038\n",
      "Epoch 44, Loss: 0.0041\n",
      "Epoch 44, Loss: 0.0051\n",
      "Epoch 44, Loss: 0.0030\n",
      "Epoch 44, Loss: 0.0046\n",
      "Epoch 45, Loss: 0.0040\n",
      "Epoch 45, Loss: 0.0046\n",
      "Epoch 45, Loss: 0.0036\n",
      "Epoch 45, Loss: 0.0050\n",
      "Epoch 45, Loss: 0.0029\n",
      "Epoch 46, Loss: 0.0029\n",
      "Epoch 46, Loss: 0.0035\n",
      "Epoch 46, Loss: 0.0048\n",
      "Epoch 46, Loss: 0.0044\n",
      "Epoch 46, Loss: 0.0038\n",
      "Epoch 47, Loss: 0.0043\n",
      "Epoch 47, Loss: 0.0028\n",
      "Epoch 47, Loss: 0.0047\n",
      "Epoch 47, Loss: 0.0038\n",
      "Epoch 47, Loss: 0.0034\n",
      "Epoch 48, Loss: 0.0034\n",
      "Epoch 48, Loss: 0.0037\n",
      "Epoch 48, Loss: 0.0027\n",
      "Epoch 48, Loss: 0.0045\n",
      "Epoch 48, Loss: 0.0041\n",
      "Epoch 49, Loss: 0.0033\n",
      "Epoch 49, Loss: 0.0044\n",
      "Epoch 49, Loss: 0.0026\n",
      "Epoch 49, Loss: 0.0040\n",
      "Epoch 49, Loss: 0.0035\n",
      "Epoch 50, Loss: 0.0040\n",
      "Epoch 50, Loss: 0.0043\n",
      "Epoch 50, Loss: 0.0026\n",
      "Epoch 50, Loss: 0.0035\n",
      "Epoch 50, Loss: 0.0031\n"
     ]
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-25T09:44:44.478745Z",
     "start_time": "2025-07-25T09:44:44.453890Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 翻译函数\n",
    "def translate(sentence, transformer):\n",
    "\n",
    "    zh_tokens = torch.LongTensor(tokenize(chinese_split(sentence), zh_word2idx))\n",
    "\n",
    "    zh_embeddings = transformer.encoder_embedding(zh_tokens).unsqueeze(0)\n",
    "    zh_embeddings = zh_embeddings + pe[:zh_embeddings.size(1), :]\n",
    "    encoder_outputs = transformer.encoder(zh_embeddings)\n",
    "\n",
    "    decoder_inputs = [en_word2idx['<bos>']]\n",
    "\n",
    "    for _ in range(50):\n",
    "        with torch.no_grad():\n",
    "            decoder_inputs_tensor = torch.LongTensor(decoder_inputs)\n",
    "            decoder_inputs_tensor = transformer.decoder_embedding(decoder_inputs_tensor).unsqueeze(0)\n",
    "            decoder_inputs_tensor = decoder_inputs_tensor + pe[:decoder_inputs_tensor.size(1), :]\n",
    "\n",
    "            output = transformer.decoder(decoder_inputs=decoder_inputs_tensor, encoder_outputs=encoder_outputs, mask=None)\n",
    "            output = transformer.fc(output)\n",
    "            pred_token = output[:,-1,:].argmax().item()\n",
    "            decoder_inputs.append(pred_token)\n",
    "\n",
    "            if pred_token == en_word2idx['<eos>']:\n",
    "                break\n",
    "\n",
    "    return ' '.join([en_vocab[idx] for idx in decoder_inputs[1:-1]])\n",
    "\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"我们一起学习\"\n",
    "print(translate(test_sentence, transformer))"
   ],
   "id": "69fa2a11cfd17fcf",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "let's study together.\n"
     ]
    }
   ],
   "execution_count": 37
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
