{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:19.843725Z",
     "start_time": "2025-05-27T05:05:19.834323Z"
    }
   },
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from modelscope.models.cv.table_recognition.modules.lore_processor import attention_score\n",
    "from torch.nn import Transformer\n",
    "import math\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "\n",
    "# 配置参数\n",
    "# DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'mps:0')\n",
    "BATCH_SIZE = 2\n",
    "NUM_EPOCHS = 200\n",
    "# NUM_EPOCHS = 2\n",
    "LEARNING_RATE = 0.001\n",
    "D_MODEL = 128  # 就是EMBEDDING_SIZE\n",
    "NUM_HEAD = 4\n",
    "NUM_ENCODER_LAYERS = 3\n",
    "NUM_DECODER_LAYERS = 3\n",
    "DIM_FEEDFORWARD = 512\n",
    "DROPOUT = 0.1\n",
    "MAX_SEQ_LENGTH = 10  # 推理时用的"
   ],
   "outputs": [],
   "execution_count": 199
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:19.947781Z",
     "start_time": "2025-05-27T05:05:19.944163Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 读取鲁迅文章并预处理\n",
    "with open('luxun_article.txt', 'r', encoding='utf-8') as f:\n",
    "    text = f.read()\n",
    "text"
   ],
   "id": "f1c6fdad671f2635",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'早上，我静坐了一会儿。陈老五送进饭来，一碗菜，一碗蒸鱼；'"
      ]
     },
     "execution_count": 200,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 200
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:19.997456Z",
     "start_time": "2025-05-27T05:05:19.994133Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据预处理\n",
    "def preprocess_data(data):\n",
    "    return list(data)\n",
    "\n",
    "\n",
    "tokens = preprocess_data(text)\n",
    "tokens"
   ],
   "id": "7aade81080628def",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['早',\n",
       " '上',\n",
       " '，',\n",
       " '我',\n",
       " '静',\n",
       " '坐',\n",
       " '了',\n",
       " '一',\n",
       " '会',\n",
       " '儿',\n",
       " '。',\n",
       " '陈',\n",
       " '老',\n",
       " '五',\n",
       " '送',\n",
       " '进',\n",
       " '饭',\n",
       " '来',\n",
       " '，',\n",
       " '一',\n",
       " '碗',\n",
       " '菜',\n",
       " '，',\n",
       " '一',\n",
       " '碗',\n",
       " '蒸',\n",
       " '鱼',\n",
       " '；']"
      ]
     },
     "execution_count": 201,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 201
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.037493Z",
     "start_time": "2025-05-27T05:05:20.033993Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 构建词汇表\n",
    "def build_vocab(tokens):\n",
    "    vocab = {'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3, '<mask>':4} # 添加mask标记\n",
    "    for token in tokens:\n",
    "        if token not in vocab:\n",
    "            vocab[token] = len(vocab)\n",
    "    return vocab\n",
    "\n",
    "\n",
    "vocab = build_vocab(tokens)\n",
    "\n",
    "vocab"
   ],
   "id": "cb470ec639e0c102",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'<pad>': 0,\n",
       " '<sos>': 1,\n",
       " '<eos>': 2,\n",
       " '<unk>': 3,\n",
       " '<mask>': 4,\n",
       " '早': 5,\n",
       " '上': 6,\n",
       " '，': 7,\n",
       " '我': 8,\n",
       " '静': 9,\n",
       " '坐': 10,\n",
       " '了': 11,\n",
       " '一': 12,\n",
       " '会': 13,\n",
       " '儿': 14,\n",
       " '。': 15,\n",
       " '陈': 16,\n",
       " '老': 17,\n",
       " '五': 18,\n",
       " '送': 19,\n",
       " '进': 20,\n",
       " '饭': 21,\n",
       " '来': 22,\n",
       " '碗': 23,\n",
       " '菜': 24,\n",
       " '蒸': 25,\n",
       " '鱼': 26,\n",
       " '；': 27}"
      ]
     },
     "execution_count": 202,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 202
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.076588Z",
     "start_time": "2025-05-27T05:05:20.073472Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 反转词汇表用于解码\n",
    "idx_to_tgt = {v: k for k, v in vocab.items()}\n",
    "idx_to_tgt"
   ],
   "id": "87fa6708baf80ca4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{0: '<pad>',\n",
       " 1: '<sos>',\n",
       " 2: '<eos>',\n",
       " 3: '<unk>',\n",
       " 4: '<mask>',\n",
       " 5: '早',\n",
       " 6: '上',\n",
       " 7: '，',\n",
       " 8: '我',\n",
       " 9: '静',\n",
       " 10: '坐',\n",
       " 11: '了',\n",
       " 12: '一',\n",
       " 13: '会',\n",
       " 14: '儿',\n",
       " 15: '。',\n",
       " 16: '陈',\n",
       " 17: '老',\n",
       " 18: '五',\n",
       " 19: '送',\n",
       " 20: '进',\n",
       " 21: '饭',\n",
       " 22: '来',\n",
       " 23: '碗',\n",
       " 24: '菜',\n",
       " 25: '蒸',\n",
       " 26: '鱼',\n",
       " 27: '；'}"
      ]
     },
     "execution_count": 203,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 203
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.110492Z",
     "start_time": "2025-05-27T05:05:20.107283Z"
    }
   },
   "cell_type": "code",
   "source": [
    "mask_indices = torch.randperm(5)[:int(10*0.15)]\n",
    "mask_indices"
   ],
   "id": "1abf417ed8dfed23",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([2])"
      ]
     },
     "execution_count": 204,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 204
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.146146Z",
     "start_time": "2025-05-27T05:05:20.140811Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 自定义Dataset\n",
    "MASK_RATE = 0.15  # MLM掩码比例\n",
    "\n",
    "class MLMDataset(Dataset):\n",
    "    def __init__(self, tokens, vocab, window_size=10):\n",
    "        self.data = []\n",
    "\n",
    "        # 按窗口大小进行切分，切分出来一个一个句子\n",
    "        # for i in range(0, len(tokens) - window_size, window_size):\n",
    "        for i in range(0, len(tokens) - window_size, window_size):\n",
    "\n",
    "            inputs = [vocab.get(token, vocab['<unk>']) for token in tokens[i:i + window_size]]\n",
    "            masked_inputs = inputs.copy()\n",
    "\n",
    "            # 随机选择15%的token进行mask\n",
    "            mask_indices = torch.randperm(len(inputs))[:int(len(inputs)*MASK_RATE)]\n",
    "\n",
    "            for idx in mask_indices:\n",
    "               masked_inputs[idx] = vocab['<mask>']\n",
    "\n",
    "            # 用遮住的预测没有遮住的\n",
    "            self.data.append((\n",
    "                torch.tensor(masked_inputs),\n",
    "                torch.tensor(inputs),\n",
    "            ))\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return self.data[idx]\n",
    "\n",
    "\n",
    "dataset = MLMDataset(tokens, vocab)\n",
    "dataset[-2:]"
   ],
   "id": "9d1c27155a783a6b",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[(tensor([21, 22,  7, 12,  4, 24,  7, 12, 23, 25]),\n",
       "  tensor([21, 22,  7, 12, 23, 24,  7, 12, 23, 25])),\n",
       " (tensor([22,  7, 12, 23, 24,  7,  4, 23, 25, 26]),\n",
       "  tensor([22,  7, 12, 23, 24,  7, 12, 23, 25, 26]))]"
      ]
     },
     "execution_count": 205,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 205
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.182817Z",
     "start_time": "2025-05-27T05:05:20.179237Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 数据加载器\n",
    "data_loader = DataLoader(dataset, batch_size=BATCH_SIZE)"
   ],
   "id": "26f6e536375b476b",
   "outputs": [],
   "execution_count": 206
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.217125Z",
     "start_time": "2025-05-27T05:05:20.214192Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 位置编码\n",
    "# 相当于生成了一个很大的位置编码，根据x的seq_len直接获取对应长度的位置编码\n",
    "class PositionalEncoding(nn.Module):\n",
    "    def __init__(self, d_model, max_len=5000):\n",
    "        super().__init__()\n",
    "        pe = torch.zeros(max_len, d_model)\n",
    "        pos = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n",
    "        pe[:, 0::2] = torch.sin(pos / (10000 ** (2 * torch.arange(0, d_model, 2) / d_model)))\n",
    "        pe[:, 1::2] = torch.cos(pos / (10000 ** (2 * torch.arange(1, d_model, 2) / d_model)))\n",
    "        self.pe = pe.unsqueeze(0).to(DEVICE)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = x + self.pe[:, :x.size(1), :]\n",
    "        return x"
   ],
   "id": "b4c6148adffb1b54",
   "outputs": [],
   "execution_count": 207
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.249172Z",
     "start_time": "2025-05-27T05:05:20.245441Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# Feed Forward，两个线程层，最后输出维度不变\n",
    "class FeedForward(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim):\n",
    "        super().__init__()\n",
    "        self.fc1 = nn.Linear(embed_dim, fc_dim)\n",
    "        self.fc2 = nn.Linear(fc_dim, embed_dim)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.fc2(torch.relu(self.fc1(x)))\n",
    "\n",
    "# 定义Encoder\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads, num_layers):\n",
    "        super().__init__()\n",
    "        self.num_layers = num_layers\n",
    "        self.layers = nn.ModuleList([\n",
    "            EncoderLayer(embed_dim, fc_dim, num_heads)\n",
    "            for _ in range(num_layers)\n",
    "        ])\n",
    "\n",
    "    def forward(self, encoder_inputs):\n",
    "        for layer in self.layers:\n",
    "            encoder_inputs = layer(encoder_inputs)\n",
    "        return encoder_inputs\n",
    "\n",
    "\n",
    "# 定义EncoderLayer\n",
    "class EncoderLayer(nn.Module):\n",
    "    def __init__(self, embed_dim, fc_dim, num_heads):\n",
    "        super().__init__()\n",
    "        self.mha = nn.MultiheadAttention(embed_dim, num_heads)\n",
    "        self.feed_forward = FeedForward(embed_dim, fc_dim)\n",
    "        self.layer_norm1 = nn.LayerNorm(embed_dim)\n",
    "        self.layer_norm2 = nn.LayerNorm(embed_dim)\n",
    "\n",
    "    def forward(self, encoder_inputs):\n",
    "        # 1.多头注意力\n",
    "        # 2.残差连接和层归一化\n",
    "\n",
    "        attn_output, _ = self.mha(query=encoder_inputs, key=encoder_inputs, value=encoder_inputs)\n",
    "\n",
    "        encoder_inputs = self.layer_norm1(encoder_inputs + attn_output)\n",
    "\n",
    "        # 3.Feed Forward\n",
    "        # 4.残差连接和层归一化\n",
    "        return self.layer_norm2(encoder_inputs + self.feed_forward(encoder_inputs))"
   ],
   "id": "a534618e13744964",
   "outputs": [],
   "execution_count": 208
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:20.284410Z",
     "start_time": "2025-05-27T05:05:20.281632Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# BERT模型\n",
    "class BERTModel(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(len(vocab), D_MODEL)\n",
    "        self.pos_encoder = PositionalEncoding(D_MODEL)\n",
    "        self.encoder = Encoder(\n",
    "            embed_dim=D_MODEL,\n",
    "            num_heads=NUM_HEAD,\n",
    "            num_layers=NUM_ENCODER_LAYERS,\n",
    "            fc_dim=D_MODEL*4\n",
    "        )\n",
    "        self.linear = nn.Linear(D_MODEL, len(vocab))  # MLM预测头\n",
    "\n",
    "    def forward(self, src):\n",
    "        # 嵌入和位置编码\n",
    "        src_emb = self.pos_encoder(self.embedding(src))\n",
    "\n",
    "        # 仅使用解码器模式\n",
    "        output = self.encoder(src_emb)\n",
    "\n",
    "        return self.linear(output)"
   ],
   "id": "8e52097d64495e52",
   "outputs": [],
   "execution_count": 209
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:05:41.424164Z",
     "start_time": "2025-05-27T05:05:20.314367Z"
    }
   },
   "cell_type": "code",
   "source": [
    "model = BERTModel().to(DEVICE)\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "# 训练循环\n",
    "for epoch in range(NUM_EPOCHS):\n",
    "    model.train()\n",
    "    total_loss = 0\n",
    "\n",
    "    for src, tgt in data_loader:\n",
    "        src = src.to(DEVICE)\n",
    "        tgt = tgt.to(DEVICE)\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "        output = model(src)\n",
    "\n",
    "        # 调整输出形状\n",
    "        loss = criterion(output.view(-1, output.size(-1)), tgt.view(-1))\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "        total_loss += loss.item()\n",
    "\n",
    "    avg_loss = total_loss / len(data_loader)\n",
    "    print(f'Epoch [{epoch + 1}/{NUM_EPOCHS}], Loss: {avg_loss:.4f}')"
   ],
   "id": "2eb65b25b022f95f",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [1/200], Loss: 2.4103\n",
      "Epoch [2/200], Loss: 0.6094\n",
      "Epoch [3/200], Loss: 0.2764\n",
      "Epoch [4/200], Loss: 0.1561\n",
      "Epoch [5/200], Loss: 0.1030\n",
      "Epoch [6/200], Loss: 0.0847\n",
      "Epoch [7/200], Loss: 0.0800\n",
      "Epoch [8/200], Loss: 0.0903\n",
      "Epoch [9/200], Loss: 0.0711\n",
      "Epoch [10/200], Loss: 0.0946\n",
      "Epoch [11/200], Loss: 0.0669\n",
      "Epoch [12/200], Loss: 0.0563\n",
      "Epoch [13/200], Loss: 0.0484\n",
      "Epoch [14/200], Loss: 0.0475\n",
      "Epoch [15/200], Loss: 0.0402\n",
      "Epoch [16/200], Loss: 0.0382\n",
      "Epoch [17/200], Loss: 0.0365\n",
      "Epoch [18/200], Loss: 0.0384\n",
      "Epoch [19/200], Loss: 0.0357\n",
      "Epoch [20/200], Loss: 0.0432\n",
      "Epoch [21/200], Loss: 0.0354\n",
      "Epoch [22/200], Loss: 0.0292\n",
      "Epoch [23/200], Loss: 0.0246\n",
      "Epoch [24/200], Loss: 0.0221\n",
      "Epoch [25/200], Loss: 0.0222\n",
      "Epoch [26/200], Loss: 0.0191\n",
      "Epoch [27/200], Loss: 0.0179\n",
      "Epoch [28/200], Loss: 0.0258\n",
      "Epoch [29/200], Loss: 0.0581\n",
      "Epoch [30/200], Loss: 0.0392\n",
      "Epoch [31/200], Loss: 0.0811\n",
      "Epoch [32/200], Loss: 0.0649\n",
      "Epoch [33/200], Loss: 0.0943\n",
      "Epoch [34/200], Loss: 0.0691\n",
      "Epoch [35/200], Loss: 0.0413\n",
      "Epoch [36/200], Loss: 0.0338\n",
      "Epoch [37/200], Loss: 0.0308\n",
      "Epoch [38/200], Loss: 0.0329\n",
      "Epoch [39/200], Loss: 0.0617\n",
      "Epoch [40/200], Loss: 0.0304\n",
      "Epoch [41/200], Loss: 0.0346\n",
      "Epoch [42/200], Loss: 0.0266\n",
      "Epoch [43/200], Loss: 0.0362\n",
      "Epoch [44/200], Loss: 0.0200\n",
      "Epoch [45/200], Loss: 0.0175\n",
      "Epoch [46/200], Loss: 0.0164\n",
      "Epoch [47/200], Loss: 0.0154\n",
      "Epoch [48/200], Loss: 0.0148\n",
      "Epoch [49/200], Loss: 0.0156\n",
      "Epoch [50/200], Loss: 0.0292\n",
      "Epoch [51/200], Loss: 0.0186\n",
      "Epoch [52/200], Loss: 0.0164\n",
      "Epoch [53/200], Loss: 0.0143\n",
      "Epoch [54/200], Loss: 0.0129\n",
      "Epoch [55/200], Loss: 0.0118\n",
      "Epoch [56/200], Loss: 0.0110\n",
      "Epoch [57/200], Loss: 0.0105\n",
      "Epoch [58/200], Loss: 0.0103\n",
      "Epoch [59/200], Loss: 0.0101\n",
      "Epoch [60/200], Loss: 0.0100\n",
      "Epoch [61/200], Loss: 0.0099\n",
      "Epoch [62/200], Loss: 0.0098\n",
      "Epoch [63/200], Loss: 0.0097\n",
      "Epoch [64/200], Loss: 0.0097\n",
      "Epoch [65/200], Loss: 0.0096\n",
      "Epoch [66/200], Loss: 0.0095\n",
      "Epoch [67/200], Loss: 0.0095\n",
      "Epoch [68/200], Loss: 0.0095\n",
      "Epoch [69/200], Loss: 0.0096\n",
      "Epoch [70/200], Loss: 0.0101\n",
      "Epoch [71/200], Loss: 0.0111\n",
      "Epoch [72/200], Loss: 0.0122\n",
      "Epoch [73/200], Loss: 0.0108\n",
      "Epoch [74/200], Loss: 0.0096\n",
      "Epoch [75/200], Loss: 0.0092\n",
      "Epoch [76/200], Loss: 0.0091\n",
      "Epoch [77/200], Loss: 0.0091\n",
      "Epoch [78/200], Loss: 0.0091\n",
      "Epoch [79/200], Loss: 0.0090\n",
      "Epoch [80/200], Loss: 0.0090\n",
      "Epoch [81/200], Loss: 0.0089\n",
      "Epoch [82/200], Loss: 0.0089\n",
      "Epoch [83/200], Loss: 0.0089\n",
      "Epoch [84/200], Loss: 0.0089\n",
      "Epoch [85/200], Loss: 0.0088\n",
      "Epoch [86/200], Loss: 0.0088\n",
      "Epoch [87/200], Loss: 0.0088\n",
      "Epoch [88/200], Loss: 0.0088\n",
      "Epoch [89/200], Loss: 0.0087\n",
      "Epoch [90/200], Loss: 0.0087\n",
      "Epoch [91/200], Loss: 0.0087\n",
      "Epoch [92/200], Loss: 0.0087\n",
      "Epoch [93/200], Loss: 0.0087\n",
      "Epoch [94/200], Loss: 0.0087\n",
      "Epoch [95/200], Loss: 0.0086\n",
      "Epoch [96/200], Loss: 0.0086\n",
      "Epoch [97/200], Loss: 0.0086\n",
      "Epoch [98/200], Loss: 0.0086\n",
      "Epoch [99/200], Loss: 0.0086\n",
      "Epoch [100/200], Loss: 0.0086\n",
      "Epoch [101/200], Loss: 0.0085\n",
      "Epoch [102/200], Loss: 0.0085\n",
      "Epoch [103/200], Loss: 0.0085\n",
      "Epoch [104/200], Loss: 0.0085\n",
      "Epoch [105/200], Loss: 0.0086\n",
      "Epoch [106/200], Loss: 0.0091\n",
      "Epoch [107/200], Loss: 0.0103\n",
      "Epoch [108/200], Loss: 0.0123\n",
      "Epoch [109/200], Loss: 0.0112\n",
      "Epoch [110/200], Loss: 0.0094\n",
      "Epoch [111/200], Loss: 0.0086\n",
      "Epoch [112/200], Loss: 0.0084\n",
      "Epoch [113/200], Loss: 0.0085\n",
      "Epoch [114/200], Loss: 0.0084\n",
      "Epoch [115/200], Loss: 0.0084\n",
      "Epoch [116/200], Loss: 0.0084\n",
      "Epoch [117/200], Loss: 0.0084\n",
      "Epoch [118/200], Loss: 0.0084\n",
      "Epoch [119/200], Loss: 0.0084\n",
      "Epoch [120/200], Loss: 0.0083\n",
      "Epoch [121/200], Loss: 0.0083\n",
      "Epoch [122/200], Loss: 0.0083\n",
      "Epoch [123/200], Loss: 0.0083\n",
      "Epoch [124/200], Loss: 0.0083\n",
      "Epoch [125/200], Loss: 0.0083\n",
      "Epoch [126/200], Loss: 0.0083\n",
      "Epoch [127/200], Loss: 0.0083\n",
      "Epoch [128/200], Loss: 0.0083\n",
      "Epoch [129/200], Loss: 0.0083\n",
      "Epoch [130/200], Loss: 0.0083\n",
      "Epoch [131/200], Loss: 0.0083\n",
      "Epoch [132/200], Loss: 0.0082\n",
      "Epoch [133/200], Loss: 0.0082\n",
      "Epoch [134/200], Loss: 0.0082\n",
      "Epoch [135/200], Loss: 0.0082\n",
      "Epoch [136/200], Loss: 0.0082\n",
      "Epoch [137/200], Loss: 0.0082\n",
      "Epoch [138/200], Loss: 0.0082\n",
      "Epoch [139/200], Loss: 0.0082\n",
      "Epoch [140/200], Loss: 0.0082\n",
      "Epoch [141/200], Loss: 0.0082\n",
      "Epoch [142/200], Loss: 0.0082\n",
      "Epoch [143/200], Loss: 0.0083\n",
      "Epoch [144/200], Loss: 0.0084\n",
      "Epoch [145/200], Loss: 0.0088\n",
      "Epoch [146/200], Loss: 0.0096\n",
      "Epoch [147/200], Loss: 0.0103\n",
      "Epoch [148/200], Loss: 0.0101\n",
      "Epoch [149/200], Loss: 0.0091\n",
      "Epoch [150/200], Loss: 0.0085\n",
      "Epoch [151/200], Loss: 0.0082\n",
      "Epoch [152/200], Loss: 0.0081\n",
      "Epoch [153/200], Loss: 0.0081\n",
      "Epoch [154/200], Loss: 0.0081\n",
      "Epoch [155/200], Loss: 0.0081\n",
      "Epoch [156/200], Loss: 0.0081\n",
      "Epoch [157/200], Loss: 0.0081\n",
      "Epoch [158/200], Loss: 0.0081\n",
      "Epoch [159/200], Loss: 0.0081\n",
      "Epoch [160/200], Loss: 0.0081\n",
      "Epoch [161/200], Loss: 0.0081\n",
      "Epoch [162/200], Loss: 0.0081\n",
      "Epoch [163/200], Loss: 0.0081\n",
      "Epoch [164/200], Loss: 0.0081\n",
      "Epoch [165/200], Loss: 0.0081\n",
      "Epoch [166/200], Loss: 0.0081\n",
      "Epoch [167/200], Loss: 0.0081\n",
      "Epoch [168/200], Loss: 0.0081\n",
      "Epoch [169/200], Loss: 0.0081\n",
      "Epoch [170/200], Loss: 0.0081\n",
      "Epoch [171/200], Loss: 0.0081\n",
      "Epoch [172/200], Loss: 0.0080\n",
      "Epoch [173/200], Loss: 0.0080\n",
      "Epoch [174/200], Loss: 0.0080\n",
      "Epoch [175/200], Loss: 0.0080\n",
      "Epoch [176/200], Loss: 0.0080\n",
      "Epoch [177/200], Loss: 0.0081\n",
      "Epoch [178/200], Loss: 0.0081\n",
      "Epoch [179/200], Loss: 0.0082\n",
      "Epoch [180/200], Loss: 0.0083\n",
      "Epoch [181/200], Loss: 0.0087\n",
      "Epoch [182/200], Loss: 0.0092\n",
      "Epoch [183/200], Loss: 0.0096\n",
      "Epoch [184/200], Loss: 0.0095\n",
      "Epoch [185/200], Loss: 0.0089\n",
      "Epoch [186/200], Loss: 0.0084\n",
      "Epoch [187/200], Loss: 0.0082\n",
      "Epoch [188/200], Loss: 0.0080\n",
      "Epoch [189/200], Loss: 0.0080\n",
      "Epoch [190/200], Loss: 0.0080\n",
      "Epoch [191/200], Loss: 0.0080\n",
      "Epoch [192/200], Loss: 0.0080\n",
      "Epoch [193/200], Loss: 0.0080\n",
      "Epoch [194/200], Loss: 0.0080\n",
      "Epoch [195/200], Loss: 0.0080\n",
      "Epoch [196/200], Loss: 0.0080\n",
      "Epoch [197/200], Loss: 0.0080\n",
      "Epoch [198/200], Loss: 0.0080\n",
      "Epoch [199/200], Loss: 0.0080\n",
      "Epoch [200/200], Loss: 0.0080\n"
     ]
    }
   ],
   "execution_count": 210
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-27T05:23:33.014558Z",
     "start_time": "2025-05-27T05:23:33.002805Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 推理函数\n",
    "def generate(model, sentence, vocab, device):\n",
    "    model.eval()\n",
    "\n",
    "    tokens = list(sentence)\n",
    "    src = torch.tensor([vocab[token] for token in tokens]).unsqueeze(0).to(device)\n",
    "    print(src)\n",
    "\n",
    "    # 随机选择掩码位置\n",
    "    mask_pos = torch.randint(0, len(src[0]), (1,)).item()\n",
    "    src[0][mask_pos] = vocab['<mask>']\n",
    "    print(src)\n",
    "\n",
    "    output = model(src)\n",
    "\n",
    "    prob = output[0][mask_pos]\n",
    "    next_token = prob.argmax().item()\n",
    "    return idx_to_tgt.get(next_token, '<unk>')\n",
    "\n",
    "    # probs = output[0]\n",
    "    # next_tokens = [prob.argmax().item() for prob in probs]\n",
    "    # return ''.join(idx_to_tgt.get(idx, '<unk>') for idx in next_tokens)\n",
    "\n",
    "\n",
    "# 测试翻译\n",
    "sentence = \"陈老五送进饭来\"\n",
    "print(generate(model, sentence, vocab, DEVICE))"
   ],
   "id": "fed9769a193d5a54",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[16, 17, 18, 19, 20, 21, 22]], device='mps:0')\n",
      "tensor([[16,  4, 18, 19, 20, 21, 22]], device='mps:0')\n",
      "五\n"
     ]
    }
   ],
   "execution_count": 435
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
