{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T10:58:47.877523Z",
     "start_time": "2025-07-10T10:58:47.873769Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from collections import Counter\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "poem_data = [\n",
    "    (\"登鹳雀楼\", \"白日依山尽，黄河入海流。欲穷千里目，更上一层楼。\"),\n",
    "    (\"静夜思\", \"床前明月光，疑是地上霜。举头望明月，低头思故乡。\"),\n",
    "    (\"春晓\", \"春眠不觉晓，处处闻啼鸟。夜来风雨声，花落知多少。\"),\n",
    "    (\"相思\", \"红豆生南国，春来发几枝。愿君多采撷，此物最相思。\"),\n",
    "    (\"江雪\", \"千山鸟飞绝，万径人踪灭。孤舟蓑笠翁，独钓寒江雪。\")\n",
    "]"
   ],
   "id": "ce053a7f01e08ecc",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:08:20.230366Z",
     "start_time": "2025-07-10T11:08:20.224075Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据预处理\n",
    "def preprocess_data(data):\n",
    "    src_sentences = []\n",
    "    tgt_sentences = []\n",
    "\n",
    "    # 中文按字符分割\n",
    "    for src, tgt in data:\n",
    "        src_tokens = list(src)\n",
    "        tgt_tokens = list(tgt)\n",
    "        src_sentences.append(src_tokens)\n",
    "        tgt_sentences.append(tgt_tokens)\n",
    "    return src_sentences, tgt_sentences\n",
    "\n",
    "src_sentences, tgt_sentences = preprocess_data(poem_data)\n",
    "src_sentences, tgt_sentences"
   ],
   "id": "63eec009df3a89bb",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([['登', '鹳', '雀', '楼'], ['静', '夜', '思'], ['春', '晓'], ['相', '思'], ['江', '雪']],\n",
       " [['白',\n",
       "   '日',\n",
       "   '依',\n",
       "   '山',\n",
       "   '尽',\n",
       "   '，',\n",
       "   '黄',\n",
       "   '河',\n",
       "   '入',\n",
       "   '海',\n",
       "   '流',\n",
       "   '。',\n",
       "   '欲',\n",
       "   '穷',\n",
       "   '千',\n",
       "   '里',\n",
       "   '目',\n",
       "   '，',\n",
       "   '更',\n",
       "   '上',\n",
       "   '一',\n",
       "   '层',\n",
       "   '楼',\n",
       "   '。'],\n",
       "  ['床',\n",
       "   '前',\n",
       "   '明',\n",
       "   '月',\n",
       "   '光',\n",
       "   '，',\n",
       "   '疑',\n",
       "   '是',\n",
       "   '地',\n",
       "   '上',\n",
       "   '霜',\n",
       "   '。',\n",
       "   '举',\n",
       "   '头',\n",
       "   '望',\n",
       "   '明',\n",
       "   '月',\n",
       "   '，',\n",
       "   '低',\n",
       "   '头',\n",
       "   '思',\n",
       "   '故',\n",
       "   '乡',\n",
       "   '。'],\n",
       "  ['春',\n",
       "   '眠',\n",
       "   '不',\n",
       "   '觉',\n",
       "   '晓',\n",
       "   '，',\n",
       "   '处',\n",
       "   '处',\n",
       "   '闻',\n",
       "   '啼',\n",
       "   '鸟',\n",
       "   '。',\n",
       "   '夜',\n",
       "   '来',\n",
       "   '风',\n",
       "   '雨',\n",
       "   '声',\n",
       "   '，',\n",
       "   '花',\n",
       "   '落',\n",
       "   '知',\n",
       "   '多',\n",
       "   '少',\n",
       "   '。'],\n",
       "  ['红',\n",
       "   '豆',\n",
       "   '生',\n",
       "   '南',\n",
       "   '国',\n",
       "   '，',\n",
       "   '春',\n",
       "   '来',\n",
       "   '发',\n",
       "   '几',\n",
       "   '枝',\n",
       "   '。',\n",
       "   '愿',\n",
       "   '君',\n",
       "   '多',\n",
       "   '采',\n",
       "   '撷',\n",
       "   '，',\n",
       "   '此',\n",
       "   '物',\n",
       "   '最',\n",
       "   '相',\n",
       "   '思',\n",
       "   '。'],\n",
       "  ['千',\n",
       "   '山',\n",
       "   '鸟',\n",
       "   '飞',\n",
       "   '绝',\n",
       "   '，',\n",
       "   '万',\n",
       "   '径',\n",
       "   '人',\n",
       "   '踪',\n",
       "   '灭',\n",
       "   '。',\n",
       "   '孤',\n",
       "   '舟',\n",
       "   '蓑',\n",
       "   '笠',\n",
       "   '翁',\n",
       "   '，',\n",
       "   '独',\n",
       "   '钓',\n",
       "   '寒',\n",
       "   '江',\n",
       "   '雪',\n",
       "   '。']])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 24
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:08:28.947072Z",
     "start_time": "2025-07-10T11:08:28.942968Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 处理特殊符号\n",
    "special_tokens = ['<pad>', '<bos>', '<eos>', '<unk>']\n",
    "\n",
    "\n",
    "# 构建词汇表\n",
    "def build_vocab(sentences):\n",
    "    counter = Counter()\n",
    "\n",
    "    for sentence in sentences:\n",
    "        for word in sentence:\n",
    "            counter[word] += 1\n",
    "\n",
    "    vocab = special_tokens.copy()\n",
    "\n",
    "    for word, count in counter.items():\n",
    "        if word not in special_tokens:\n",
    "            vocab.append(word)\n",
    "\n",
    "    word2idx = {word: idx for idx, word in enumerate(vocab)}\n",
    "    return vocab, word2idx\n",
    "\n",
    "\n",
    "# 构建中英文词汇表\n",
    "src_vocab, src_word2idx = build_vocab([sentence for sentence in src_sentences])\n",
    "tgt_vocab, tgt_word2idx = build_vocab([sentence for sentence in tgt_sentences])\n",
    "\n",
    "# print(src_vocab)\n",
    "# print(tgt_vocab)\n",
    "print(src_word2idx)\n",
    "print(tgt_word2idx)"
   ],
   "id": "ba851494e9bff76e",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'<pad>': 0, '<bos>': 1, '<eos>': 2, '<unk>': 3, '登': 4, '鹳': 5, '雀': 6, '楼': 7, '静': 8, '夜': 9, '思': 10, '春': 11, '晓': 12, '相': 13, '江': 14, '雪': 15}\n",
      "{'<pad>': 0, '<bos>': 1, '<eos>': 2, '<unk>': 3, '白': 4, '日': 5, '依': 6, '山': 7, '尽': 8, '，': 9, '黄': 10, '河': 11, '入': 12, '海': 13, '流': 14, '。': 15, '欲': 16, '穷': 17, '千': 18, '里': 19, '目': 20, '更': 21, '上': 22, '一': 23, '层': 24, '楼': 25, '床': 26, '前': 27, '明': 28, '月': 29, '光': 30, '疑': 31, '是': 32, '地': 33, '霜': 34, '举': 35, '头': 36, '望': 37, '低': 38, '思': 39, '故': 40, '乡': 41, '春': 42, '眠': 43, '不': 44, '觉': 45, '晓': 46, '处': 47, '闻': 48, '啼': 49, '鸟': 50, '夜': 51, '来': 52, '风': 53, '雨': 54, '声': 55, '花': 56, '落': 57, '知': 58, '多': 59, '少': 60, '红': 61, '豆': 62, '生': 63, '南': 64, '国': 65, '发': 66, '几': 67, '枝': 68, '愿': 69, '君': 70, '采': 71, '撷': 72, '此': 73, '物': 74, '最': 75, '相': 76, '飞': 77, '绝': 78, '万': 79, '径': 80, '人': 81, '踪': 82, '灭': 83, '孤': 84, '舟': 85, '蓑': 86, '笠': 87, '翁': 88, '独': 89, '钓': 90, '寒': 91, '江': 92, '雪': 93}\n"
     ]
    }
   ],
   "execution_count": 25
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:08:56.185242Z",
     "start_time": "2025-07-10T11:08:56.179759Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 参数设置\n",
    "SRC_VOCAB_SIZE = len(src_vocab)\n",
    "TGT_VOCAB_SIZE = len(tgt_vocab)\n",
    "D_MODEL = 128\n",
    "BATCH_SIZE = 2\n",
    "LEARNING_RATE = 0.005\n",
    "\n",
    "\n",
    "def tokenize(words, word2idx):\n",
    "    # 如果某个词在字典中找不到，则用'<unk>'的索引代替\n",
    "    return [word2idx.get(word, word2idx['<unk>']) for word in words]\n",
    "\n",
    "\n",
    "processed_data_src = []\n",
    "processed_data_tgt = []\n",
    "for src, tgt in zip(src_sentences, tgt_sentences):\n",
    "    src_numerical = tokenize(src, src_word2idx)\n",
    "    tgt_numerical = [tgt_word2idx['<bos>']] + tokenize(tgt, tgt_word2idx) + [tgt_word2idx['<eos>']]\n",
    "    processed_data_src.append(torch.LongTensor(src_numerical))\n",
    "    processed_data_tgt.append(torch.LongTensor(tgt_numerical))\n",
    "\n",
    "processed_data_tgt"
   ],
   "id": "bfc7c186f1217e14",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[tensor([ 1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n",
       "          9, 21, 22, 23, 24, 25, 15,  2]),\n",
       " tensor([ 1, 26, 27, 28, 29, 30,  9, 31, 32, 33, 22, 34, 15, 35, 36, 37, 28, 29,\n",
       "          9, 38, 36, 39, 40, 41, 15,  2]),\n",
       " tensor([ 1, 42, 43, 44, 45, 46,  9, 47, 47, 48, 49, 50, 15, 51, 52, 53, 54, 55,\n",
       "          9, 56, 57, 58, 59, 60, 15,  2]),\n",
       " tensor([ 1, 61, 62, 63, 64, 65,  9, 42, 52, 66, 67, 68, 15, 69, 70, 59, 71, 72,\n",
       "          9, 73, 74, 75, 76, 39, 15,  2]),\n",
       " tensor([ 1, 18,  7, 50, 77, 78,  9, 79, 80, 81, 82, 83, 15, 84, 85, 86, 87, 88,\n",
       "          9, 89, 90, 91, 92, 93, 15,  2])]"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 26
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:07.835727Z",
     "start_time": "2025-07-10T11:09:07.832022Z"
    }
   },
   "cell_type": "code",
   "source": [
    "processed_data_src_pad = nn.utils.rnn.pad_sequence(processed_data_src, batch_first=True,\n",
    "                                                  padding_value=src_word2idx['<pad>'])\n",
    "processed_data_src_pad"
   ],
   "id": "7e8909062d1e80f2",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 4,  5,  6,  7],\n",
       "        [ 8,  9, 10,  0],\n",
       "        [11, 12,  0,  0],\n",
       "        [13, 10,  0,  0],\n",
       "        [14, 15,  0,  0]])"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 27
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:10.543528Z",
     "start_time": "2025-07-10T11:09:10.538727Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_tgt_pad = nn.utils.rnn.pad_sequence(processed_data_tgt, batch_first=True,\n",
    "                                                  padding_value=tgt_word2idx['<pad>'])\n",
    "processed_data_tgt_pad"
   ],
   "id": "50c016488093de20",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n",
       "          9, 21, 22, 23, 24, 25, 15,  2],\n",
       "        [ 1, 26, 27, 28, 29, 30,  9, 31, 32, 33, 22, 34, 15, 35, 36, 37, 28, 29,\n",
       "          9, 38, 36, 39, 40, 41, 15,  2],\n",
       "        [ 1, 42, 43, 44, 45, 46,  9, 47, 47, 48, 49, 50, 15, 51, 52, 53, 54, 55,\n",
       "          9, 56, 57, 58, 59, 60, 15,  2],\n",
       "        [ 1, 61, 62, 63, 64, 65,  9, 42, 52, 66, 67, 68, 15, 69, 70, 59, 71, 72,\n",
       "          9, 73, 74, 75, 76, 39, 15,  2],\n",
       "        [ 1, 18,  7, 50, 77, 78,  9, 79, 80, 81, 82, 83, 15, 84, 85, 86, 87, 88,\n",
       "          9, 89, 90, 91, 92, 93, 15,  2]])"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:35.176732Z",
     "start_time": "2025-07-10T11:09:35.171132Z"
    }
   },
   "cell_type": "code",
   "source": [
    "\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "dataset = TensorDataset(processed_data_src_pad, processed_data_tgt_pad)\n",
    "dataloader = DataLoader(dataset, batch_size=1, shuffle=False)\n",
    "\n",
    "for src, tgt in dataloader:\n",
    "    print(src)\n",
    "    print(tgt)\n",
    "    break"
   ],
   "id": "19524ded28178041",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[4, 5, 6, 7]])\n",
      "tensor([[ 1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n",
      "          9, 21, 22, 23, 24, 25, 15,  2]])\n"
     ]
    }
   ],
   "execution_count": 29
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:46.056017Z",
     "start_time": "2025-07-10T11:09:46.052358Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码\n",
    "# 大都督周瑜（我的微信: dadudu6789）\n",
    "\n",
    "import math\n",
    "\n",
    "\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_seq_len=128):\n",
    "        super().__init__()\n",
    "        self.pe = torch.zeros(max_seq_len, d_model)\n",
    "        position = torch.arange(0, max_seq_len, dtype=torch.float).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n",
    "        self.pe[:, 0::2] = torch.sin(position * div_term)\n",
    "        self.pe[:, 1::2] = torch.cos(position * div_term)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x + self.pe[:x.size(1), :]\n",
    "        return x"
   ],
   "id": "3c9ad1f6b832098a",
   "outputs": [],
   "execution_count": 30
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:52.142609Z",
     "start_time": "2025-07-10T11:09:52.138905Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ZhouyuModel(nn.Module):\n",
    "    def __init__(self, d_model, dim_feedforward, nhead, num_encoder_layers, num_decoder_layers):\n",
    "        super().__init__()\n",
    "        self.encoder_embedding = nn.Embedding(len(src_vocab), d_model)\n",
    "        self.decoder_embedding = nn.Embedding(len(tgt_vocab), d_model)\n",
    "        self.pos_encoder = PositionalEncoding(d_model)\n",
    "        self.transformer = nn.Transformer(d_model=d_model, dim_feedforward=dim_feedforward, nhead=nhead,\n",
    "                                          num_encoder_layers=num_encoder_layers,\n",
    "                                          num_decoder_layers=num_decoder_layers, batch_first=True, dropout=0)\n",
    "        self.fc = nn.Linear(d_model, len(tgt_vocab))\n",
    "\n",
    "    def forward(self, src, tgt):\n",
    "\n",
    "        batch_size, en_seq_len = tgt.shape\n",
    "        # mask = torch.tril(torch.ones(en_seq_len, en_seq_len))\n",
    "        mask = nn.Transformer.generate_square_subsequent_mask(en_seq_len)\n",
    "\n",
    "        # 词嵌入和位置编码\n",
    "        encoder_input = self.pos_encoder(self.encoder_embedding(src))\n",
    "        decoder_input = self.pos_encoder(self.decoder_embedding(tgt))\n",
    "\n",
    "        output = self.transformer(\n",
    "            src=encoder_input, tgt=decoder_input,\n",
    "            tgt_mask=mask\n",
    "        )\n",
    "\n",
    "        return self.fc(output)"
   ],
   "id": "b9f10077b71810ed",
   "outputs": [],
   "execution_count": 31
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:54.688544Z",
     "start_time": "2025-07-10T11:09:54.668649Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = ZhouyuModel(d_model=128, dim_feedforward=2048, nhead=8, num_encoder_layers=2, num_decoder_layers=2)\n",
    "optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=tgt_word2idx['<pad>'])"
   ],
   "id": "8d5734d16acad79e",
   "outputs": [],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:09:56.565894Z",
     "start_time": "2025-07-10T11:09:56.563543Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 打印一下model的参数个数\n",
    "print(sum(p.numel() for p in model.parameters()))"
   ],
   "id": "b44ae3b38a67757a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2531422\n"
     ]
    }
   ],
   "execution_count": 33
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:10:04.913901Z",
     "start_time": "2025-07-10T11:10:03.537303Z"
    }
   },
   "cell_type": "code",
   "source": [
    "for epoch in range(20):\n",
    "    for src, tgt in dataloader:\n",
    "        # 准备解码器输入输出，其实这一步可以在数据处理时做掉\n",
    "        decoder_input = tgt[:, :-1]  # 移除最后一个token  tensor([[1, 4, 5, 6, 7, 8, 9]])\n",
    "        decoder_target = tgt[:, 1:]  # 移除第一个token    tensor([[4, 5, 6, 7, 8, 9, 2]])\n",
    "\n",
    "        decoder_outputs = model(src, decoder_input)\n",
    "\n",
    "        # 计算损失\n",
    "        loss = criterion(\n",
    "            # decoder_output本来是(batch_size, seq_len, vocab_size)，变成(batch_size * seq_len, vocab_size)\n",
    "            # decoder_target本来是(batch_size, seq_len)，变成(batch_size * seq_len)\n",
    "            decoder_outputs.view(-1, decoder_outputs.size(-1)),\n",
    "            decoder_target.view(-1)\n",
    "        )\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')"
   ],
   "id": "2573befd839be55c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 4.8608\n",
      "Epoch 1, Loss: 4.5106\n",
      "Epoch 1, Loss: 4.7312\n",
      "Epoch 1, Loss: 4.9179\n",
      "Epoch 1, Loss: 4.8792\n",
      "Epoch 2, Loss: 4.2399\n",
      "Epoch 2, Loss: 4.0221\n",
      "Epoch 2, Loss: 4.1562\n",
      "Epoch 2, Loss: 4.3885\n",
      "Epoch 2, Loss: 4.5121\n",
      "Epoch 3, Loss: 4.0885\n",
      "Epoch 3, Loss: 3.8313\n",
      "Epoch 3, Loss: 3.8590\n",
      "Epoch 3, Loss: 3.8335\n",
      "Epoch 3, Loss: 3.9192\n",
      "Epoch 4, Loss: 2.7080\n",
      "Epoch 4, Loss: 3.3350\n",
      "Epoch 4, Loss: 3.0133\n",
      "Epoch 4, Loss: 3.3189\n",
      "Epoch 4, Loss: 3.4096\n",
      "Epoch 5, Loss: 1.9106\n",
      "Epoch 5, Loss: 2.1204\n",
      "Epoch 5, Loss: 2.2081\n",
      "Epoch 5, Loss: 2.7348\n",
      "Epoch 5, Loss: 2.3861\n",
      "Epoch 6, Loss: 1.4321\n",
      "Epoch 6, Loss: 1.5706\n",
      "Epoch 6, Loss: 1.6909\n",
      "Epoch 6, Loss: 1.8521\n",
      "Epoch 6, Loss: 1.8491\n",
      "Epoch 7, Loss: 1.3301\n",
      "Epoch 7, Loss: 1.2691\n",
      "Epoch 7, Loss: 1.1711\n",
      "Epoch 7, Loss: 1.5945\n",
      "Epoch 7, Loss: 1.4547\n",
      "Epoch 8, Loss: 0.8385\n",
      "Epoch 8, Loss: 0.9792\n",
      "Epoch 8, Loss: 1.0672\n",
      "Epoch 8, Loss: 1.1525\n",
      "Epoch 8, Loss: 0.9828\n",
      "Epoch 9, Loss: 0.7181\n",
      "Epoch 9, Loss: 0.7519\n",
      "Epoch 9, Loss: 0.6820\n",
      "Epoch 9, Loss: 0.7836\n",
      "Epoch 9, Loss: 0.6863\n",
      "Epoch 10, Loss: 0.5208\n",
      "Epoch 10, Loss: 0.6068\n",
      "Epoch 10, Loss: 0.5296\n",
      "Epoch 10, Loss: 0.5496\n",
      "Epoch 10, Loss: 0.5077\n",
      "Epoch 11, Loss: 0.3932\n",
      "Epoch 11, Loss: 0.4355\n",
      "Epoch 11, Loss: 0.3899\n",
      "Epoch 11, Loss: 0.3873\n",
      "Epoch 11, Loss: 0.3838\n",
      "Epoch 12, Loss: 0.3101\n",
      "Epoch 12, Loss: 0.3185\n",
      "Epoch 12, Loss: 0.3162\n",
      "Epoch 12, Loss: 0.3054\n",
      "Epoch 12, Loss: 0.3056\n",
      "Epoch 13, Loss: 0.2455\n",
      "Epoch 13, Loss: 0.2383\n",
      "Epoch 13, Loss: 0.2658\n",
      "Epoch 13, Loss: 0.2413\n",
      "Epoch 13, Loss: 0.2365\n",
      "Epoch 14, Loss: 0.2053\n",
      "Epoch 14, Loss: 0.2071\n",
      "Epoch 14, Loss: 0.2256\n",
      "Epoch 14, Loss: 0.2010\n",
      "Epoch 14, Loss: 0.2010\n",
      "Epoch 15, Loss: 0.1750\n",
      "Epoch 15, Loss: 0.1790\n",
      "Epoch 15, Loss: 0.2034\n",
      "Epoch 15, Loss: 0.1758\n",
      "Epoch 15, Loss: 0.1776\n",
      "Epoch 16, Loss: 0.1535\n",
      "Epoch 16, Loss: 0.1565\n",
      "Epoch 16, Loss: 0.1871\n",
      "Epoch 16, Loss: 0.1584\n",
      "Epoch 16, Loss: 0.1560\n",
      "Epoch 17, Loss: 0.1425\n",
      "Epoch 17, Loss: 0.1423\n",
      "Epoch 17, Loss: 0.1718\n",
      "Epoch 17, Loss: 0.1455\n",
      "Epoch 17, Loss: 0.1401\n",
      "Epoch 18, Loss: 0.1331\n",
      "Epoch 18, Loss: 0.1295\n",
      "Epoch 18, Loss: 0.1583\n",
      "Epoch 18, Loss: 0.1347\n",
      "Epoch 18, Loss: 0.1317\n",
      "Epoch 19, Loss: 0.1227\n",
      "Epoch 19, Loss: 0.1176\n",
      "Epoch 19, Loss: 0.1606\n",
      "Epoch 19, Loss: 0.1249\n",
      "Epoch 19, Loss: 0.1218\n",
      "Epoch 20, Loss: 0.1139\n",
      "Epoch 20, Loss: 0.1064\n",
      "Epoch 20, Loss: 0.2677\n",
      "Epoch 20, Loss: 0.1159\n",
      "Epoch 20, Loss: 0.1117\n"
     ]
    }
   ],
   "execution_count": 34
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-07-10T11:10:54.231347Z",
     "start_time": "2025-07-10T11:10:54.188125Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 生成函数\n",
    "def translate(sentence, model):\n",
    "    src_tokens = torch.LongTensor(tokenize(list(sentence), src_word2idx))\n",
    "    encoder_input = model.pos_encoder(model.encoder_embedding(src_tokens).unsqueeze(0))\n",
    "    encoder_outputs = model.transformer.encoder(encoder_input)\n",
    "\n",
    "    decoder_inputs = [tgt_word2idx['<bos>']]\n",
    "\n",
    "    for _ in range(50):\n",
    "        with torch.no_grad():\n",
    "            decoder_input = model.pos_encoder(model.decoder_embedding(torch.LongTensor(decoder_inputs)).unsqueeze(0))\n",
    "            decoder_output = model.transformer.decoder(tgt=decoder_input, memory=encoder_outputs, tgt_mask=None)\n",
    "            output = model.fc(decoder_output)\n",
    "            pred_token = output[:, -1, :].argmax().item()\n",
    "            decoder_inputs.append(pred_token)\n",
    "            if pred_token == tgt_word2idx['<eos>']:\n",
    "                break\n",
    "\n",
    "    return ''.join([tgt_vocab[idx] for idx in decoder_inputs[1:-1]])\n",
    "\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"春晓\"\n",
    "print(translate(test_sentence, model))"
   ],
   "id": "69fa2a11cfd17fcf",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "春眠不觉晓，处处闻啼鸟。夜来风雨声，花落知多少。\n"
     ]
    }
   ],
   "execution_count": 36
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
