{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.505636Z",
     "start_time": "2025-05-26T08:28:02.498761Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.nn import Transformer\n",
    "import math\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "\n",
    "# 配置参数\n",
    "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "BATCH_SIZE = 2\n",
    "NUM_EPOCHS = 200\n",
    "LEARNING_RATE = 0.001\n",
    "D_MODEL = 128  # 就是EMBEDDING_SIZE\n",
    "NUM_HEAD = 4\n",
    "NUM_ENCODER_LAYERS = 3\n",
    "NUM_DECODER_LAYERS = 3\n",
    "DIM_FEEDFORWARD = 512\n",
    "DROPOUT = 0.1\n",
    "MAX_SEQ_LENGTH = 50  # 推理时用的"
   ],
   "outputs": [],
   "execution_count": 40
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.540436Z",
     "start_time": "2025-05-26T08:28:02.537863Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构造带标题的数据集\n",
    "title_poem_data = [\n",
    "    (\"登鹳雀楼\", \"白日依山尽，黄河入海流。欲穷千里目，更上一层楼。\"),\n",
    "    (\"静夜思\", \"床前明月光，疑是地上霜。举头望明月，低头思故乡。\"),\n",
    "    (\"春晓\", \"春眠不觉晓，处处闻啼鸟。夜来风雨声，花落知多少。\"),\n",
    "    (\"相思\", \"红豆生南国，春来发几枝。愿君多采撷，此物最相思。\"),\n",
    "    (\"江雪\", \"千山鸟飞绝，万径人踪灭。孤舟蓑笠翁，独钓寒江雪。\")\n",
    "]"
   ],
   "id": "f1c6fdad671f2635",
   "outputs": [],
   "execution_count": 41
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.554493Z",
     "start_time": "2025-05-26T08:28:02.550343Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据预处理\n",
    "def preprocess_data(data):\n",
    "    src_sentences = []\n",
    "    tgt_sentences = []\n",
    "\n",
    "    # 中文按字符分割\n",
    "    for src, tgt in data:\n",
    "        src_tokens = list(src)\n",
    "        tgt_tokens = list(tgt)\n",
    "        src_sentences.append(src_tokens)\n",
    "        tgt_sentences.append(tgt_tokens)\n",
    "    return src_sentences, tgt_sentences\n",
    "\n",
    "src_sentences, tgt_sentences = preprocess_data(title_poem_data)\n",
    "src_sentences, tgt_sentences"
   ],
   "id": "7aade81080628def",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([['登', '鹳', '雀', '楼'], ['静', '夜', '思'], ['春', '晓'], ['相', '思'], ['江', '雪']],\n",
       " [['白',\n",
       "   '日',\n",
       "   '依',\n",
       "   '山',\n",
       "   '尽',\n",
       "   '，',\n",
       "   '黄',\n",
       "   '河',\n",
       "   '入',\n",
       "   '海',\n",
       "   '流',\n",
       "   '。',\n",
       "   '欲',\n",
       "   '穷',\n",
       "   '千',\n",
       "   '里',\n",
       "   '目',\n",
       "   '，',\n",
       "   '更',\n",
       "   '上',\n",
       "   '一',\n",
       "   '层',\n",
       "   '楼',\n",
       "   '。'],\n",
       "  ['床',\n",
       "   '前',\n",
       "   '明',\n",
       "   '月',\n",
       "   '光',\n",
       "   '，',\n",
       "   '疑',\n",
       "   '是',\n",
       "   '地',\n",
       "   '上',\n",
       "   '霜',\n",
       "   '。',\n",
       "   '举',\n",
       "   '头',\n",
       "   '望',\n",
       "   '明',\n",
       "   '月',\n",
       "   '，',\n",
       "   '低',\n",
       "   '头',\n",
       "   '思',\n",
       "   '故',\n",
       "   '乡',\n",
       "   '。'],\n",
       "  ['春',\n",
       "   '眠',\n",
       "   '不',\n",
       "   '觉',\n",
       "   '晓',\n",
       "   '，',\n",
       "   '处',\n",
       "   '处',\n",
       "   '闻',\n",
       "   '啼',\n",
       "   '鸟',\n",
       "   '。',\n",
       "   '夜',\n",
       "   '来',\n",
       "   '风',\n",
       "   '雨',\n",
       "   '声',\n",
       "   '，',\n",
       "   '花',\n",
       "   '落',\n",
       "   '知',\n",
       "   '多',\n",
       "   '少',\n",
       "   '。'],\n",
       "  ['红',\n",
       "   '豆',\n",
       "   '生',\n",
       "   '南',\n",
       "   '国',\n",
       "   '，',\n",
       "   '春',\n",
       "   '来',\n",
       "   '发',\n",
       "   '几',\n",
       "   '枝',\n",
       "   '。',\n",
       "   '愿',\n",
       "   '君',\n",
       "   '多',\n",
       "   '采',\n",
       "   '撷',\n",
       "   '，',\n",
       "   '此',\n",
       "   '物',\n",
       "   '最',\n",
       "   '相',\n",
       "   '思',\n",
       "   '。'],\n",
       "  ['千',\n",
       "   '山',\n",
       "   '鸟',\n",
       "   '飞',\n",
       "   '绝',\n",
       "   '，',\n",
       "   '万',\n",
       "   '径',\n",
       "   '人',\n",
       "   '踪',\n",
       "   '灭',\n",
       "   '。',\n",
       "   '孤',\n",
       "   '舟',\n",
       "   '蓑',\n",
       "   '笠',\n",
       "   '翁',\n",
       "   '，',\n",
       "   '独',\n",
       "   '钓',\n",
       "   '寒',\n",
       "   '江',\n",
       "   '雪',\n",
       "   '。']])"
      ]
     },
     "execution_count": 42,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 42
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.567038Z",
     "start_time": "2025-05-26T08:28:02.562669Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建词汇表\n",
    "def build_vocab(sentences):\n",
    "    vocab = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3}\n",
    "    for sentence in sentences:\n",
    "        for token in sentence:\n",
    "            if token not in vocab:\n",
    "                vocab[token] = len(vocab)\n",
    "    return vocab\n",
    "\n",
    "src_vocab = build_vocab(src_sentences)\n",
    "tgt_vocab = build_vocab(tgt_sentences)\n",
    "\n",
    "src_vocab, tgt_vocab"
   ],
   "id": "cb470ec639e0c102",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "({'<pad>': 0,\n",
       "  '<sos>': 1,\n",
       "  '<eos>': 2,\n",
       "  '<unk>': 3,\n",
       "  '登': 4,\n",
       "  '鹳': 5,\n",
       "  '雀': 6,\n",
       "  '楼': 7,\n",
       "  '静': 8,\n",
       "  '夜': 9,\n",
       "  '思': 10,\n",
       "  '春': 11,\n",
       "  '晓': 12,\n",
       "  '相': 13,\n",
       "  '江': 14,\n",
       "  '雪': 15},\n",
       " {'<pad>': 0,\n",
       "  '<sos>': 1,\n",
       "  '<eos>': 2,\n",
       "  '<unk>': 3,\n",
       "  '白': 4,\n",
       "  '日': 5,\n",
       "  '依': 6,\n",
       "  '山': 7,\n",
       "  '尽': 8,\n",
       "  '，': 9,\n",
       "  '黄': 10,\n",
       "  '河': 11,\n",
       "  '入': 12,\n",
       "  '海': 13,\n",
       "  '流': 14,\n",
       "  '。': 15,\n",
       "  '欲': 16,\n",
       "  '穷': 17,\n",
       "  '千': 18,\n",
       "  '里': 19,\n",
       "  '目': 20,\n",
       "  '更': 21,\n",
       "  '上': 22,\n",
       "  '一': 23,\n",
       "  '层': 24,\n",
       "  '楼': 25,\n",
       "  '床': 26,\n",
       "  '前': 27,\n",
       "  '明': 28,\n",
       "  '月': 29,\n",
       "  '光': 30,\n",
       "  '疑': 31,\n",
       "  '是': 32,\n",
       "  '地': 33,\n",
       "  '霜': 34,\n",
       "  '举': 35,\n",
       "  '头': 36,\n",
       "  '望': 37,\n",
       "  '低': 38,\n",
       "  '思': 39,\n",
       "  '故': 40,\n",
       "  '乡': 41,\n",
       "  '春': 42,\n",
       "  '眠': 43,\n",
       "  '不': 44,\n",
       "  '觉': 45,\n",
       "  '晓': 46,\n",
       "  '处': 47,\n",
       "  '闻': 48,\n",
       "  '啼': 49,\n",
       "  '鸟': 50,\n",
       "  '夜': 51,\n",
       "  '来': 52,\n",
       "  '风': 53,\n",
       "  '雨': 54,\n",
       "  '声': 55,\n",
       "  '花': 56,\n",
       "  '落': 57,\n",
       "  '知': 58,\n",
       "  '多': 59,\n",
       "  '少': 60,\n",
       "  '红': 61,\n",
       "  '豆': 62,\n",
       "  '生': 63,\n",
       "  '南': 64,\n",
       "  '国': 65,\n",
       "  '发': 66,\n",
       "  '几': 67,\n",
       "  '枝': 68,\n",
       "  '愿': 69,\n",
       "  '君': 70,\n",
       "  '采': 71,\n",
       "  '撷': 72,\n",
       "  '此': 73,\n",
       "  '物': 74,\n",
       "  '最': 75,\n",
       "  '相': 76,\n",
       "  '飞': 77,\n",
       "  '绝': 78,\n",
       "  '万': 79,\n",
       "  '径': 80,\n",
       "  '人': 81,\n",
       "  '踪': 82,\n",
       "  '灭': 83,\n",
       "  '孤': 84,\n",
       "  '舟': 85,\n",
       "  '蓑': 86,\n",
       "  '笠': 87,\n",
       "  '翁': 88,\n",
       "  '独': 89,\n",
       "  '钓': 90,\n",
       "  '寒': 91,\n",
       "  '江': 92,\n",
       "  '雪': 93})"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 43
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.578290Z",
     "start_time": "2025-05-26T08:28:02.575318Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 反转词汇表用于解码\n",
    "idx_to_tgt = {v: k for k, v in tgt_vocab.items()}\n",
    "idx_to_tgt"
   ],
   "id": "87fa6708baf80ca4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{0: '<pad>',\n",
       " 1: '<sos>',\n",
       " 2: '<eos>',\n",
       " 3: '<unk>',\n",
       " 4: '白',\n",
       " 5: '日',\n",
       " 6: '依',\n",
       " 7: '山',\n",
       " 8: '尽',\n",
       " 9: '，',\n",
       " 10: '黄',\n",
       " 11: '河',\n",
       " 12: '入',\n",
       " 13: '海',\n",
       " 14: '流',\n",
       " 15: '。',\n",
       " 16: '欲',\n",
       " 17: '穷',\n",
       " 18: '千',\n",
       " 19: '里',\n",
       " 20: '目',\n",
       " 21: '更',\n",
       " 22: '上',\n",
       " 23: '一',\n",
       " 24: '层',\n",
       " 25: '楼',\n",
       " 26: '床',\n",
       " 27: '前',\n",
       " 28: '明',\n",
       " 29: '月',\n",
       " 30: '光',\n",
       " 31: '疑',\n",
       " 32: '是',\n",
       " 33: '地',\n",
       " 34: '霜',\n",
       " 35: '举',\n",
       " 36: '头',\n",
       " 37: '望',\n",
       " 38: '低',\n",
       " 39: '思',\n",
       " 40: '故',\n",
       " 41: '乡',\n",
       " 42: '春',\n",
       " 43: '眠',\n",
       " 44: '不',\n",
       " 45: '觉',\n",
       " 46: '晓',\n",
       " 47: '处',\n",
       " 48: '闻',\n",
       " 49: '啼',\n",
       " 50: '鸟',\n",
       " 51: '夜',\n",
       " 52: '来',\n",
       " 53: '风',\n",
       " 54: '雨',\n",
       " 55: '声',\n",
       " 56: '花',\n",
       " 57: '落',\n",
       " 58: '知',\n",
       " 59: '多',\n",
       " 60: '少',\n",
       " 61: '红',\n",
       " 62: '豆',\n",
       " 63: '生',\n",
       " 64: '南',\n",
       " 65: '国',\n",
       " 66: '发',\n",
       " 67: '几',\n",
       " 68: '枝',\n",
       " 69: '愿',\n",
       " 70: '君',\n",
       " 71: '采',\n",
       " 72: '撷',\n",
       " 73: '此',\n",
       " 74: '物',\n",
       " 75: '最',\n",
       " 76: '相',\n",
       " 77: '飞',\n",
       " 78: '绝',\n",
       " 79: '万',\n",
       " 80: '径',\n",
       " 81: '人',\n",
       " 82: '踪',\n",
       " 83: '灭',\n",
       " 84: '孤',\n",
       " 85: '舟',\n",
       " 86: '蓑',\n",
       " 87: '笠',\n",
       " 88: '翁',\n",
       " 89: '独',\n",
       " 90: '钓',\n",
       " 91: '寒',\n",
       " 92: '江',\n",
       " 93: '雪'}"
      ]
     },
     "execution_count": 44,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 44
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.595765Z",
     "start_time": "2025-05-26T08:28:02.590539Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 自定义Dataset\n",
    "class TranslationDataset(Dataset):\n",
    "    def __init__(self, src_sentences, tgt_sentences, src_vocab, tgt_vocab):\n",
    "        self.data = []\n",
    "        for src, tgt in zip(src_sentences, tgt_sentences):\n",
    "            src_indices = [src_vocab.get(token, src_vocab['<unk>']) for token in src]\n",
    "            tgt_indices = [tgt_vocab.get(token, tgt_vocab['<unk>']) for token in tgt]\n",
    "\n",
    "            # 添加特殊符号\n",
    "            src_indices = [src_vocab['<sos>']] + src_indices + [src_vocab['<eos>']]\n",
    "            tgt_input = [tgt_vocab['<sos>']] + tgt_indices\n",
    "            tgt_output = tgt_indices + [tgt_vocab['<eos>']]\n",
    "\n",
    "            self.data.append((\n",
    "                torch.tensor(src_indices, dtype=torch.long),\n",
    "                torch.tensor(tgt_input, dtype=torch.long),\n",
    "                torch.tensor(tgt_output, dtype=torch.long)\n",
    "            ))\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return self.data[idx]\n",
    "\n",
    "dataset = TranslationDataset(src_sentences, tgt_sentences, src_vocab, tgt_vocab)\n",
    "dataset[:2]"
   ],
   "id": "9d1c27155a783a6b",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[(tensor([1, 4, 5, 6, 7, 2]),\n",
       "  tensor([ 1,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n",
       "           9, 21, 22, 23, 24, 25, 15]),\n",
       "  tensor([ 4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,  9,\n",
       "          21, 22, 23, 24, 25, 15,  2])),\n",
       " (tensor([ 1,  8,  9, 10,  2]),\n",
       "  tensor([ 1, 26, 27, 28, 29, 30,  9, 31, 32, 33, 22, 34, 15, 35, 36, 37, 28, 29,\n",
       "           9, 38, 36, 39, 40, 41, 15]),\n",
       "  tensor([26, 27, 28, 29, 30,  9, 31, 32, 33, 22, 34, 15, 35, 36, 37, 28, 29,  9,\n",
       "          38, 36, 39, 40, 41, 15,  2]))]"
      ]
     },
     "execution_count": 45,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 45
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.610196Z",
     "start_time": "2025-05-26T08:28:02.607138Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据加载器\n",
    "def collate_fn(batch):\n",
    "    src_batch, tgt_input_batch, tgt_output_batch = zip(*batch)\n",
    "\n",
    "    src_batch = pad_sequence(src_batch, padding_value=src_vocab['<pad>'])\n",
    "    tgt_input_batch = pad_sequence(tgt_input_batch, padding_value=tgt_vocab['<pad>'])\n",
    "    tgt_output_batch = pad_sequence(tgt_output_batch, padding_value=tgt_vocab['<pad>'])\n",
    "\n",
    "    return src_batch, tgt_input_batch, tgt_output_batch\n",
    "\n",
    "data_loader = DataLoader(dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn)"
   ],
   "id": "26f6e536375b476b",
   "outputs": [],
   "execution_count": 46
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.620119Z",
     "start_time": "2025-05-26T08:28:02.617228Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码\n",
    "# 相当于生成了一个很大的位置编码，根据x的seq_len直接获取对应长度的位置编码\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_len=5000):\n",
    "        super().__init__()\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        pos = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        pe[:, 0::2] = torch.sin(pos / (10000 ** (2 * torch.arange(0, d_model, 2) / d_model)))\n",
    "        pe[:, 1::2] = torch.cos(pos / (10000 ** (2 * torch.arange(1, d_model, 2) / d_model)))\n",
    "        self.pe = pe.unsqueeze(0)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x + self.pe[:, :x.size(1), :]\n",
    "        return x"
   ],
   "id": "b4c6148adffb1b54",
   "outputs": [],
   "execution_count": 47
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:02.630050Z",
     "start_time": "2025-05-26T08:28:02.627222Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Transformer模型\n",
    "class TransformerModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.src_embedding = nn.Embedding(len(src_vocab), D_MODEL)\n",
    "        self.tgt_embedding = nn.Embedding(len(tgt_vocab), D_MODEL)\n",
    "        self.pos_encoder = PositionalEncoding(D_MODEL)\n",
    "        self.transformer = Transformer(\n",
    "            d_model=D_MODEL,\n",
    "            nhead=NUM_HEAD,\n",
    "            num_encoder_layers=NUM_ENCODER_LAYERS,\n",
    "            num_decoder_layers=NUM_DECODER_LAYERS,\n",
    "            dim_feedforward=DIM_FEEDFORWARD,\n",
    "            dropout=DROPOUT\n",
    "        )\n",
    "        self.fc_out = nn.Linear(D_MODEL, len(tgt_vocab))\n",
    "\n",
    "    def forward(self, src, tgt):\n",
    "\n",
    "        # 生成mask\n",
    "        # 训练时，已经知道了tgt的seq_len，所以可以直接生成掩码\n",
    "        tgt_mask = self.transformer.generate_square_subsequent_mask(tgt.size(0)).to(DEVICE)\n",
    "\n",
    "        # 嵌入和位置编码\n",
    "        src_emb = self.pos_encoder(self.src_embedding(src) * math.sqrt(D_MODEL))\n",
    "        tgt_emb = self.pos_encoder(self.tgt_embedding(tgt) * math.sqrt(D_MODEL))\n",
    "\n",
    "        # Transformer处理\n",
    "        output = self.transformer(\n",
    "            src_emb, tgt_emb,\n",
    "            tgt_mask=tgt_mask\n",
    "        )\n",
    "\n",
    "        return self.fc_out(output)"
   ],
   "id": "8e52097d64495e52",
   "outputs": [],
   "execution_count": 48
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:08.229794Z",
     "start_time": "2025-05-26T08:28:02.637264Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = TransformerModel().to(DEVICE)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=tgt_vocab['<pad>'])\n",
    "\n",
    "# 训练循环\n",
    "for epoch in range(NUM_EPOCHS):\n",
    "    model.train()\n",
    "    total_loss = 0\n",
    "\n",
    "    for src, tgt_input, tgt_output in data_loader:\n",
    "        src = src.to(DEVICE)\n",
    "        tgt_input = tgt_input.to(DEVICE)\n",
    "        tgt_output = tgt_output.to(DEVICE)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        output = model(src, tgt_input)\n",
    "\n",
    "        # 调整输出形状\n",
    "        loss = criterion(output.view(-1, output.size(-1)), tgt_output.view(-1))\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += loss.item()\n",
    "\n",
    "    avg_loss = total_loss / len(data_loader)\n",
    "    print(f'Epoch [{epoch+1}/{NUM_EPOCHS}], Loss: {avg_loss:.4f}')"
   ],
   "id": "2eb65b25b022f95f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/200], Loss: 4.7147\n",
      "Epoch [2/200], Loss: 4.2440\n",
      "Epoch [3/200], Loss: 4.0967\n",
      "Epoch [4/200], Loss: 3.8280\n",
      "Epoch [5/200], Loss: 3.5020\n",
      "Epoch [6/200], Loss: 3.1480\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/dadudu/miniconda3/envs/mini-gpt/lib/python3.8/site-packages/torch/nn/modules/transformer.py:307: UserWarning: enable_nested_tensor is True, but self.use_nested_tensor is False because encoder_layer.self_attn.batch_first was not True(use batch_first for better inference performance)\n",
      "  warnings.warn(f\"enable_nested_tensor is True, but self.use_nested_tensor is False because {why_not_sparsity_fast_path}\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [7/200], Loss: 2.7163\n",
      "Epoch [8/200], Loss: 2.3688\n",
      "Epoch [9/200], Loss: 2.0481\n",
      "Epoch [10/200], Loss: 1.8167\n",
      "Epoch [11/200], Loss: 1.5742\n",
      "Epoch [12/200], Loss: 1.3748\n",
      "Epoch [13/200], Loss: 1.1892\n",
      "Epoch [14/200], Loss: 0.9982\n",
      "Epoch [15/200], Loss: 0.8949\n",
      "Epoch [16/200], Loss: 0.7701\n",
      "Epoch [17/200], Loss: 0.6767\n",
      "Epoch [18/200], Loss: 0.5874\n",
      "Epoch [19/200], Loss: 0.5385\n",
      "Epoch [20/200], Loss: 0.4855\n",
      "Epoch [21/200], Loss: 0.4407\n",
      "Epoch [22/200], Loss: 0.3849\n",
      "Epoch [23/200], Loss: 0.3345\n",
      "Epoch [24/200], Loss: 0.2998\n",
      "Epoch [25/200], Loss: 0.2707\n",
      "Epoch [26/200], Loss: 0.2571\n",
      "Epoch [27/200], Loss: 0.2275\n",
      "Epoch [28/200], Loss: 0.2210\n",
      "Epoch [29/200], Loss: 0.1977\n",
      "Epoch [30/200], Loss: 0.1731\n",
      "Epoch [31/200], Loss: 0.1871\n",
      "Epoch [32/200], Loss: 0.1610\n",
      "Epoch [33/200], Loss: 0.1474\n",
      "Epoch [34/200], Loss: 0.1624\n",
      "Epoch [35/200], Loss: 0.1480\n",
      "Epoch [36/200], Loss: 0.1213\n",
      "Epoch [37/200], Loss: 0.1387\n",
      "Epoch [38/200], Loss: 0.1674\n",
      "Epoch [39/200], Loss: 0.1234\n",
      "Epoch [40/200], Loss: 0.1232\n",
      "Epoch [41/200], Loss: 0.1096\n",
      "Epoch [42/200], Loss: 0.1025\n",
      "Epoch [43/200], Loss: 0.0851\n",
      "Epoch [44/200], Loss: 0.0896\n",
      "Epoch [45/200], Loss: 0.0768\n",
      "Epoch [46/200], Loss: 0.0843\n",
      "Epoch [47/200], Loss: 0.0733\n",
      "Epoch [48/200], Loss: 0.0758\n",
      "Epoch [49/200], Loss: 0.0648\n",
      "Epoch [50/200], Loss: 0.0583\n",
      "Epoch [51/200], Loss: 0.0631\n",
      "Epoch [52/200], Loss: 0.0601\n",
      "Epoch [53/200], Loss: 0.0547\n",
      "Epoch [54/200], Loss: 0.0528\n",
      "Epoch [55/200], Loss: 0.0560\n",
      "Epoch [56/200], Loss: 0.0475\n",
      "Epoch [57/200], Loss: 0.0544\n",
      "Epoch [58/200], Loss: 0.0472\n",
      "Epoch [59/200], Loss: 0.0506\n",
      "Epoch [60/200], Loss: 0.0469\n",
      "Epoch [61/200], Loss: 0.0433\n",
      "Epoch [62/200], Loss: 0.0402\n",
      "Epoch [63/200], Loss: 0.0398\n",
      "Epoch [64/200], Loss: 0.0388\n",
      "Epoch [65/200], Loss: 0.0349\n",
      "Epoch [66/200], Loss: 0.0355\n",
      "Epoch [67/200], Loss: 0.0392\n",
      "Epoch [68/200], Loss: 0.0326\n",
      "Epoch [69/200], Loss: 0.0347\n",
      "Epoch [70/200], Loss: 0.0337\n",
      "Epoch [71/200], Loss: 0.0397\n",
      "Epoch [72/200], Loss: 0.0348\n",
      "Epoch [73/200], Loss: 0.0494\n",
      "Epoch [74/200], Loss: 0.0739\n",
      "Epoch [75/200], Loss: 0.0796\n",
      "Epoch [76/200], Loss: 0.0421\n",
      "Epoch [77/200], Loss: 0.0572\n",
      "Epoch [78/200], Loss: 0.0556\n",
      "Epoch [79/200], Loss: 0.0408\n",
      "Epoch [80/200], Loss: 0.0381\n",
      "Epoch [81/200], Loss: 0.0447\n",
      "Epoch [82/200], Loss: 0.0345\n",
      "Epoch [83/200], Loss: 0.0410\n",
      "Epoch [84/200], Loss: 0.0321\n",
      "Epoch [85/200], Loss: 0.0333\n",
      "Epoch [86/200], Loss: 0.0287\n",
      "Epoch [87/200], Loss: 0.0319\n",
      "Epoch [88/200], Loss: 0.0291\n",
      "Epoch [89/200], Loss: 0.0250\n",
      "Epoch [90/200], Loss: 0.0235\n",
      "Epoch [91/200], Loss: 0.0263\n",
      "Epoch [92/200], Loss: 0.0248\n",
      "Epoch [93/200], Loss: 0.0278\n",
      "Epoch [94/200], Loss: 0.0483\n",
      "Epoch [95/200], Loss: 0.0479\n",
      "Epoch [96/200], Loss: 0.0272\n",
      "Epoch [97/200], Loss: 0.0322\n",
      "Epoch [98/200], Loss: 0.0391\n",
      "Epoch [99/200], Loss: 0.0252\n",
      "Epoch [100/200], Loss: 0.0240\n",
      "Epoch [101/200], Loss: 0.0219\n",
      "Epoch [102/200], Loss: 0.0221\n",
      "Epoch [103/200], Loss: 0.0230\n",
      "Epoch [104/200], Loss: 0.0225\n",
      "Epoch [105/200], Loss: 0.0251\n",
      "Epoch [106/200], Loss: 0.0227\n",
      "Epoch [107/200], Loss: 0.0187\n",
      "Epoch [108/200], Loss: 0.0206\n",
      "Epoch [109/200], Loss: 0.0157\n",
      "Epoch [110/200], Loss: 0.0182\n",
      "Epoch [111/200], Loss: 0.0194\n",
      "Epoch [112/200], Loss: 0.0161\n",
      "Epoch [113/200], Loss: 0.0168\n",
      "Epoch [114/200], Loss: 0.0238\n",
      "Epoch [115/200], Loss: 0.0250\n",
      "Epoch [116/200], Loss: 0.0161\n",
      "Epoch [117/200], Loss: 0.0320\n",
      "Epoch [118/200], Loss: 0.0140\n",
      "Epoch [119/200], Loss: 0.0200\n",
      "Epoch [120/200], Loss: 0.0213\n",
      "Epoch [121/200], Loss: 0.0160\n",
      "Epoch [122/200], Loss: 0.0170\n",
      "Epoch [123/200], Loss: 0.0144\n",
      "Epoch [124/200], Loss: 0.0149\n",
      "Epoch [125/200], Loss: 0.0280\n",
      "Epoch [126/200], Loss: 0.0167\n",
      "Epoch [127/200], Loss: 0.0152\n",
      "Epoch [128/200], Loss: 0.0161\n",
      "Epoch [129/200], Loss: 0.0460\n",
      "Epoch [130/200], Loss: 0.0209\n",
      "Epoch [131/200], Loss: 0.0169\n",
      "Epoch [132/200], Loss: 0.0251\n",
      "Epoch [133/200], Loss: 0.0399\n",
      "Epoch [134/200], Loss: 0.0272\n",
      "Epoch [135/200], Loss: 0.0369\n",
      "Epoch [136/200], Loss: 0.0341\n",
      "Epoch [137/200], Loss: 0.0560\n",
      "Epoch [138/200], Loss: 0.0236\n",
      "Epoch [139/200], Loss: 0.0335\n",
      "Epoch [140/200], Loss: 0.0270\n",
      "Epoch [141/200], Loss: 0.0246\n",
      "Epoch [142/200], Loss: 0.0239\n",
      "Epoch [143/200], Loss: 0.0216\n",
      "Epoch [144/200], Loss: 0.0163\n",
      "Epoch [145/200], Loss: 0.0205\n",
      "Epoch [146/200], Loss: 0.0362\n",
      "Epoch [147/200], Loss: 0.0213\n",
      "Epoch [148/200], Loss: 0.0507\n",
      "Epoch [149/200], Loss: 0.0197\n",
      "Epoch [150/200], Loss: 0.0529\n",
      "Epoch [151/200], Loss: 0.0216\n",
      "Epoch [152/200], Loss: 0.0316\n",
      "Epoch [153/200], Loss: 0.0233\n",
      "Epoch [154/200], Loss: 0.0305\n",
      "Epoch [155/200], Loss: 0.0716\n",
      "Epoch [156/200], Loss: 0.0202\n",
      "Epoch [157/200], Loss: 0.0229\n",
      "Epoch [158/200], Loss: 0.0204\n",
      "Epoch [159/200], Loss: 0.0191\n",
      "Epoch [160/200], Loss: 0.0188\n",
      "Epoch [161/200], Loss: 0.0167\n",
      "Epoch [162/200], Loss: 0.0149\n",
      "Epoch [163/200], Loss: 0.0116\n",
      "Epoch [164/200], Loss: 0.0161\n",
      "Epoch [165/200], Loss: 0.0151\n",
      "Epoch [166/200], Loss: 0.0089\n",
      "Epoch [167/200], Loss: 0.0335\n",
      "Epoch [168/200], Loss: 0.0182\n",
      "Epoch [169/200], Loss: 0.0104\n",
      "Epoch [170/200], Loss: 0.0130\n",
      "Epoch [171/200], Loss: 0.0096\n",
      "Epoch [172/200], Loss: 0.0148\n",
      "Epoch [173/200], Loss: 0.0128\n",
      "Epoch [174/200], Loss: 0.0104\n",
      "Epoch [175/200], Loss: 0.0140\n",
      "Epoch [176/200], Loss: 0.0121\n",
      "Epoch [177/200], Loss: 0.0096\n",
      "Epoch [178/200], Loss: 0.0091\n",
      "Epoch [179/200], Loss: 0.0112\n",
      "Epoch [180/200], Loss: 0.0175\n",
      "Epoch [181/200], Loss: 0.0080\n",
      "Epoch [182/200], Loss: 0.0094\n",
      "Epoch [183/200], Loss: 0.0141\n",
      "Epoch [184/200], Loss: 0.0258\n",
      "Epoch [185/200], Loss: 0.0102\n",
      "Epoch [186/200], Loss: 0.0180\n",
      "Epoch [187/200], Loss: 0.0213\n",
      "Epoch [188/200], Loss: 0.0133\n",
      "Epoch [189/200], Loss: 0.0118\n",
      "Epoch [190/200], Loss: 0.0138\n",
      "Epoch [191/200], Loss: 0.0138\n",
      "Epoch [192/200], Loss: 0.0136\n",
      "Epoch [193/200], Loss: 0.0114\n",
      "Epoch [194/200], Loss: 0.0128\n",
      "Epoch [195/200], Loss: 0.0100\n",
      "Epoch [196/200], Loss: 0.0088\n",
      "Epoch [197/200], Loss: 0.0101\n",
      "Epoch [198/200], Loss: 0.0102\n",
      "Epoch [199/200], Loss: 0.0097\n",
      "Epoch [200/200], Loss: 0.0091\n"
     ]
    }
   ],
   "execution_count": 49
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T08:28:08.294067Z",
     "start_time": "2025-05-26T08:28:08.242157Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 推理函数\n",
    "def translate(model, sentence, src_vocab, tgt_vocab, device):\n",
    "    model.eval()\n",
    "    tokens = list(sentence)\n",
    "    src_indices = [src_vocab.get(token, src_vocab['<unk>']) for token in tokens]\n",
    "    src = torch.tensor([src_vocab['<sos>']] + src_indices + [src_vocab['<eos>']]).unsqueeze(1).to(device)\n",
    "\n",
    "    memory = model.transformer.encoder(model.pos_encoder(model.src_embedding(src)))\n",
    "\n",
    "    tgt = torch.tensor([[tgt_vocab['<sos>']]]).to(device)\n",
    "    for _ in range(MAX_SEQ_LENGTH):\n",
    "\n",
    "        # 推理时，tgt是不断变化的，所以每次要生成新的掩码\n",
    "        tgt_mask = model.transformer.generate_square_subsequent_mask(tgt.size(0)).to(device)\n",
    "\n",
    "        output = model.transformer.decoder(\n",
    "            model.pos_encoder(model.tgt_embedding(tgt)),\n",
    "            memory,\n",
    "            tgt_mask=tgt_mask\n",
    "        )\n",
    "        prob = model.fc_out(output[-1, :, :])\n",
    "        next_token = prob.argmax().item()\n",
    "        tgt = torch.cat([tgt, torch.tensor([[next_token]]).to(device)], dim=0)\n",
    "\n",
    "        if next_token == tgt_vocab['<eos>']:\n",
    "            break\n",
    "\n",
    "    translated = [idx_to_tgt.get(idx, '<unk>') for idx in tgt.squeeze().tolist()[1:-1]]\n",
    "    return ' '.join(translated)\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"静夜思\"\n",
    "print(f'Source: {test_sentence}')\n",
    "print(f'Translation: {translate(model, test_sentence, src_vocab, tgt_vocab, DEVICE)}')"
   ],
   "id": "fed9769a193d5a54",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Source: 静夜思\n",
      "Translation: 床 前 明 月 光 ， 疑 是 地 上 霜 。 举 头 望 明 月 ， 低 头 思 故 乡 。\n"
     ]
    }
   ],
   "execution_count": 50
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
