{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "e25ee3b07acf5d94",
   "metadata": {},
   "source": [
    "在前面的RNN中，在训练时，如果我们输入的序列长度是5，那么输出的序列长度也是5，而Seq2Seq则输入和输出的长度可以是不一样的，因此非常适合做文本翻译的任务。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 340,
   "id": "4333c824dd487f82",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.446443Z",
     "start_time": "2025-05-23T05:15:06.434854Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "([['你好', '，', '今天天气', '真', '好', '！'],\n",
       "  ['你', '吃饭', '了', '吗', '？'],\n",
       "  ['深度', '学习', '很', '有趣', '。'],\n",
       "  ['我们', '一起', '学习', '吧', '。'],\n",
       "  ['这是', '一个', '测试', '例子', '。']],\n",
       " [['hello,', 'the', 'weather', 'is', 'nice', 'today!'],\n",
       "  ['have', 'you', 'eaten', 'yet?'],\n",
       "  ['deep', 'learning', 'is', 'interesting.'],\n",
       "  [\"let's\", 'study', 'together.'],\n",
       "  ['this', 'is', 'a', 'test', 'example.']])"
      ]
     },
     "execution_count": 340,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from collections import Counter\n",
    "\n",
    "import jieba\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "# 自定义简单数据集\n",
    "raw_data = [\n",
    "    (\"你好，今天天气真好！\", \"Hello, the weather is nice today!\"),\n",
    "    (\"你吃饭了吗？\", \"Have you eaten yet?\"),\n",
    "    (\"深度学习很有趣。\", \"Deep learning is interesting.\"),\n",
    "    (\"我们一起学习吧。\", \"Let's study together.\"),\n",
    "    (\"这是一个测试例子。\", \"This is a test example.\")\n",
    "]\n",
    "\n",
    "\n",
    "# 中文分词函数\n",
    "def tokenize_chinese(text):\n",
    "    return list(jieba.cut(text))  # 使用结巴分词\n",
    "\n",
    "\n",
    "# 英文分词函数\n",
    "def tokenize_english(text):\n",
    "    return text.lower().split()\n",
    "\n",
    "\n",
    "# 处理原始数据\n",
    "chinese_sentences = [tokenize_chinese(pair[0]) for pair in raw_data]\n",
    "english_sentences = [tokenize_english(pair[1]) for pair in raw_data]\n",
    "\n",
    "chinese_sentences, english_sentences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 341,
   "id": "48ec47fe0c16591b",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.482171Z",
     "start_time": "2025-05-23T05:15:06.477345Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['<pad>', '<sos>', '<eos>', '<unk>', '你好', '，', '今天天气', '真', '好', '！', '你', '吃饭', '了', '吗', '？', '深度', '学习', '很', '有趣', '。', '我们', '一起', '吧', '这是', '一个', '测试', '例子']\n",
      "['<pad>', '<sos>', '<eos>', '<unk>', 'hello,', 'the', 'weather', 'is', 'nice', 'today!', 'have', 'you', 'eaten', 'yet?', 'deep', 'learning', 'interesting.', \"let's\", 'study', 'together.', 'this', 'a', 'test', 'example.']\n",
      "{'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3, '你好': 4, '，': 5, '今天天气': 6, '真': 7, '好': 8, '！': 9, '你': 10, '吃饭': 11, '了': 12, '吗': 13, '？': 14, '深度': 15, '学习': 16, '很': 17, '有趣': 18, '。': 19, '我们': 20, '一起': 21, '吧': 22, '这是': 23, '一个': 24, '测试': 25, '例子': 26}\n",
      "{'<pad>': 0, '<sos>': 1, '<eos>': 2, '<unk>': 3, 'hello,': 4, 'the': 5, 'weather': 6, 'is': 7, 'nice': 8, 'today!': 9, 'have': 10, 'you': 11, 'eaten': 12, 'yet?': 13, 'deep': 14, 'learning': 15, 'interesting.': 16, \"let's\": 17, 'study': 18, 'together.': 19, 'this': 20, 'a': 21, 'test': 22, 'example.': 23}\n"
     ]
    }
   ],
   "source": [
    "# 处理特殊符号\n",
    "special_tokens = ['<pad>', '<sos>', '<eos>', '<unk>']\n",
    "\n",
    "\n",
    "# 构建词汇表\n",
    "def build_vocab(sentences, min_freq=1):\n",
    "    counter = Counter()\n",
    "\n",
    "    for sentence in sentences:\n",
    "        for word in sentence:\n",
    "            counter[word] += 1\n",
    "\n",
    "    vocab = special_tokens.copy()\n",
    "\n",
    "    # 遍历没个词以及出现的次数，至少要出现min_freq次才放到词汇表中\n",
    "    for word, count in counter.items():\n",
    "        if count >= min_freq and word not in special_tokens:\n",
    "            vocab.append(word)\n",
    "\n",
    "    word2idx = {word: idx for idx, word in enumerate(vocab)}\n",
    "    return vocab, word2idx\n",
    "\n",
    "\n",
    "# 构建中英文词汇表\n",
    "zh_vocab, zh_word2idx = build_vocab([sentence for sentence in chinese_sentences])\n",
    "\n",
    "en_vocab, en_word2idx = build_vocab([sentence for sentence in english_sentences])\n",
    "\n",
    "print(zh_vocab)\n",
    "print(en_vocab)\n",
    "print(zh_word2idx)\n",
    "print(en_word2idx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 342,
   "id": "d2fff714a1743f7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.498740Z",
     "start_time": "2025-05-23T05:15:06.494240Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[tensor([1, 4, 5, 6, 7, 8, 9, 2]), tensor([ 1, 10, 11, 12, 13, 14,  2]), tensor([ 1, 15, 16, 17, 18, 19,  2]), tensor([ 1, 20, 21, 16, 22, 19,  2]), tensor([ 1, 23, 24, 25, 26, 19,  2])]\n",
      "[tensor([1, 4, 5, 6, 7, 8, 9, 2]), tensor([ 1, 10, 11, 12, 13,  2]), tensor([ 1, 14, 15,  7, 16,  2]), tensor([ 1, 17, 18, 19,  2]), tensor([ 1, 20,  7, 21, 22, 23,  2])]\n"
     ]
    }
   ],
   "source": [
    "# 参数设置\n",
    "ZH_VOCAB_SIZE = len(zh_vocab)\n",
    "EN_VOCAB_SIZE = len(en_vocab)\n",
    "HIDDEN_SIZE = 256\n",
    "BATCH_SIZE = 2\n",
    "LEARNING_RATE = 0.005\n",
    "\n",
    "\n",
    "# 数据数值化处理\n",
    "def numericalize(sentence, word2idx):\n",
    "    # 如果某个词在字典中找不到，则用'<unk>'的索引代替\n",
    "    return [word2idx.get(word, word2idx['<unk>']) for word in sentence]\n",
    "\n",
    "\n",
    "processed_data_ch = []\n",
    "processed_data_en = []\n",
    "for ch, en in zip(chinese_sentences, english_sentences):\n",
    "    # ch，en分别是一个中文句子和对应的英文句子\n",
    "    # 在每个句子的前面加上'<sos>'，在每个句子的后面加上'<eos>'，这样大模型才能知道什么时候停止生成句子\n",
    "    ch_numerical = [zh_word2idx['<sos>']] + numericalize(ch, zh_word2idx) + [zh_word2idx['<eos>']]\n",
    "    en_numerical = [en_word2idx['<sos>']] + numericalize(en, en_word2idx) + [en_word2idx['<eos>']]\n",
    "    processed_data_ch.append(torch.LongTensor(ch_numerical))\n",
    "    processed_data_en.append(torch.LongTensor(en_numerical))\n",
    "\n",
    "print(processed_data_ch)\n",
    "print(processed_data_en)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 343,
   "id": "57a324dbb970a327",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.515516Z",
     "start_time": "2025-05-23T05:15:06.511788Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "        [ 1, 10, 11, 12, 13, 14,  2,  0],\n",
       "        [ 1, 15, 16, 17, 18, 19,  2,  0],\n",
       "        [ 1, 20, 21, 16, 22, 19,  2,  0],\n",
       "        [ 1, 23, 24, 25, 26, 19,  2,  0]])"
      ]
     },
     "execution_count": 343,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_ch_pad = nn.utils.rnn.pad_sequence(processed_data_ch, batch_first=True,\n",
    "                                                  padding_value=zh_word2idx['<pad>'])\n",
    "processed_data_ch_pad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 344,
   "id": "713085be4adad395",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.559813Z",
     "start_time": "2025-05-23T05:15:06.556355Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "        [ 1, 10, 11, 12, 13,  2,  0,  0],\n",
       "        [ 1, 14, 15,  7, 16,  2,  0,  0],\n",
       "        [ 1, 17, 18, 19,  2,  0,  0,  0],\n",
       "        [ 1, 20,  7, 21, 22, 23,  2,  0]])"
      ]
     },
     "execution_count": 344,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 对processed_data进行数据填充，对齐长度\n",
    "processed_data_en_pad = nn.utils.rnn.pad_sequence(processed_data_en, batch_first=True,\n",
    "                                                  padding_value=en_word2idx['<pad>'])\n",
    "processed_data_en_pad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 345,
   "id": "98774b884bd6cc05",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:06.624026Z",
     "start_time": "2025-05-23T05:15:06.620592Z"
    }
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "         [ 1, 10, 11, 12, 13, 14,  2,  0]]),\n",
       " tensor([[ 1,  4,  5,  6,  7,  8,  9,  2],\n",
       "         [ 1, 10, 11, 12, 13,  2,  0,  0]]))"
      ]
     },
     "execution_count": 345,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\n",
    "from torch.utils.data import DataLoader, TensorDataset\n",
    "\n",
    "dataset = TensorDataset(processed_data_ch_pad, processed_data_en_pad)\n",
    "dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n",
    "\n",
    "dataset[:2]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 349,
   "id": "e0bb3d119e9e8fc9",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:18:43.715089Z",
     "start_time": "2025-05-23T05:18:41.241253Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1, Loss: 3.2416\n",
      "Epoch 1, Loss: 3.3868\n",
      "Epoch 1, Loss: 3.8177\n",
      "Epoch 2, Loss: 1.1930\n",
      "Epoch 2, Loss: 1.1911\n",
      "Epoch 2, Loss: 1.4663\n",
      "Epoch 3, Loss: 0.3807\n",
      "Epoch 3, Loss: 0.4848\n",
      "Epoch 3, Loss: 0.3804\n",
      "Epoch 4, Loss: 0.2378\n",
      "Epoch 4, Loss: 0.2092\n",
      "Epoch 4, Loss: 0.0702\n",
      "Epoch 5, Loss: 0.0886\n",
      "Epoch 5, Loss: 0.0346\n",
      "Epoch 5, Loss: 0.3840\n",
      "Epoch 6, Loss: 0.0399\n",
      "Epoch 6, Loss: 0.0569\n",
      "Epoch 6, Loss: 0.0101\n",
      "Epoch 7, Loss: 0.0083\n",
      "Epoch 7, Loss: 0.0210\n",
      "Epoch 7, Loss: 0.1439\n",
      "Epoch 8, Loss: 0.0098\n",
      "Epoch 8, Loss: 0.0225\n",
      "Epoch 8, Loss: 0.0158\n",
      "Epoch 9, Loss: 0.0084\n",
      "Epoch 9, Loss: 0.0074\n",
      "Epoch 9, Loss: 0.0140\n",
      "Epoch 10, Loss: 0.0061\n",
      "Epoch 10, Loss: 0.0102\n",
      "Epoch 10, Loss: 0.0020\n",
      "Epoch 11, Loss: 0.0080\n",
      "Epoch 11, Loss: 0.0047\n",
      "Epoch 11, Loss: 0.0016\n",
      "Epoch 12, Loss: 0.0069\n",
      "Epoch 12, Loss: 0.0025\n",
      "Epoch 12, Loss: 0.0018\n",
      "Epoch 13, Loss: 0.0032\n",
      "Epoch 13, Loss: 0.0020\n",
      "Epoch 13, Loss: 0.0062\n",
      "Epoch 14, Loss: 0.0023\n",
      "Epoch 14, Loss: 0.0031\n",
      "Epoch 14, Loss: 0.0026\n",
      "Epoch 15, Loss: 0.0019\n",
      "Epoch 15, Loss: 0.0022\n",
      "Epoch 15, Loss: 0.0028\n",
      "Epoch 16, Loss: 0.0031\n",
      "Epoch 16, Loss: 0.0014\n",
      "Epoch 16, Loss: 0.0011\n",
      "Epoch 17, Loss: 0.0015\n",
      "Epoch 17, Loss: 0.0015\n",
      "Epoch 17, Loss: 0.0030\n",
      "Epoch 18, Loss: 0.0012\n",
      "Epoch 18, Loss: 0.0016\n",
      "Epoch 18, Loss: 0.0027\n",
      "Epoch 19, Loss: 0.0018\n",
      "Epoch 19, Loss: 0.0016\n",
      "Epoch 19, Loss: 0.0008\n",
      "Epoch 20, Loss: 0.0011\n",
      "Epoch 20, Loss: 0.0015\n",
      "Epoch 20, Loss: 0.0017\n",
      "Epoch 21, Loss: 0.0008\n",
      "Epoch 21, Loss: 0.0017\n",
      "Epoch 21, Loss: 0.0016\n",
      "Epoch 22, Loss: 0.0015\n",
      "Epoch 22, Loss: 0.0013\n",
      "Epoch 22, Loss: 0.0007\n",
      "Epoch 23, Loss: 0.0011\n",
      "Epoch 23, Loss: 0.0016\n",
      "Epoch 23, Loss: 0.0007\n",
      "Epoch 24, Loss: 0.0011\n",
      "Epoch 24, Loss: 0.0015\n",
      "Epoch 24, Loss: 0.0007\n",
      "Epoch 25, Loss: 0.0007\n",
      "Epoch 25, Loss: 0.0013\n",
      "Epoch 25, Loss: 0.0017\n",
      "Epoch 26, Loss: 0.0014\n",
      "Epoch 26, Loss: 0.0010\n",
      "Epoch 26, Loss: 0.0006\n",
      "Epoch 27, Loss: 0.0012\n",
      "Epoch 27, Loss: 0.0007\n",
      "Epoch 27, Loss: 0.0015\n",
      "Epoch 28, Loss: 0.0009\n",
      "Epoch 28, Loss: 0.0009\n",
      "Epoch 28, Loss: 0.0015\n",
      "Epoch 29, Loss: 0.0010\n",
      "Epoch 29, Loss: 0.0008\n",
      "Epoch 29, Loss: 0.0011\n",
      "Epoch 30, Loss: 0.0009\n",
      "Epoch 30, Loss: 0.0009\n",
      "Epoch 30, Loss: 0.0011\n",
      "Epoch 31, Loss: 0.0012\n",
      "Epoch 31, Loss: 0.0008\n",
      "Epoch 31, Loss: 0.0006\n",
      "Epoch 32, Loss: 0.0009\n",
      "Epoch 32, Loss: 0.0008\n",
      "Epoch 32, Loss: 0.0010\n",
      "Epoch 33, Loss: 0.0007\n",
      "Epoch 33, Loss: 0.0009\n",
      "Epoch 33, Loss: 0.0010\n",
      "Epoch 34, Loss: 0.0010\n",
      "Epoch 34, Loss: 0.0008\n",
      "Epoch 34, Loss: 0.0006\n",
      "Epoch 35, Loss: 0.0009\n",
      "Epoch 35, Loss: 0.0009\n",
      "Epoch 35, Loss: 0.0005\n",
      "Epoch 36, Loss: 0.0008\n",
      "Epoch 36, Loss: 0.0007\n",
      "Epoch 36, Loss: 0.0011\n",
      "Epoch 37, Loss: 0.0009\n",
      "Epoch 37, Loss: 0.0008\n",
      "Epoch 37, Loss: 0.0005\n",
      "Epoch 38, Loss: 0.0007\n",
      "Epoch 38, Loss: 0.0009\n",
      "Epoch 38, Loss: 0.0005\n",
      "Epoch 39, Loss: 0.0005\n",
      "Epoch 39, Loss: 0.0009\n",
      "Epoch 39, Loss: 0.0010\n",
      "Epoch 40, Loss: 0.0008\n",
      "Epoch 40, Loss: 0.0005\n",
      "Epoch 40, Loss: 0.0010\n",
      "Epoch 41, Loss: 0.0008\n",
      "Epoch 41, Loss: 0.0006\n",
      "Epoch 41, Loss: 0.0005\n",
      "Epoch 42, Loss: 0.0006\n",
      "Epoch 42, Loss: 0.0007\n",
      "Epoch 42, Loss: 0.0010\n",
      "Epoch 43, Loss: 0.0007\n",
      "Epoch 43, Loss: 0.0006\n",
      "Epoch 43, Loss: 0.0008\n",
      "Epoch 44, Loss: 0.0008\n",
      "Epoch 44, Loss: 0.0006\n",
      "Epoch 44, Loss: 0.0004\n",
      "Epoch 45, Loss: 0.0004\n",
      "Epoch 45, Loss: 0.0008\n",
      "Epoch 45, Loss: 0.0008\n",
      "Epoch 46, Loss: 0.0006\n",
      "Epoch 46, Loss: 0.0008\n",
      "Epoch 46, Loss: 0.0004\n",
      "Epoch 47, Loss: 0.0007\n",
      "Epoch 47, Loss: 0.0004\n",
      "Epoch 47, Loss: 0.0008\n",
      "Epoch 48, Loss: 0.0006\n",
      "Epoch 48, Loss: 0.0005\n",
      "Epoch 48, Loss: 0.0007\n",
      "Epoch 49, Loss: 0.0008\n",
      "Epoch 49, Loss: 0.0005\n",
      "Epoch 49, Loss: 0.0004\n",
      "Epoch 50, Loss: 0.0005\n",
      "Epoch 50, Loss: 0.0006\n",
      "Epoch 50, Loss: 0.0008\n",
      "Epoch 51, Loss: 0.0005\n",
      "Epoch 51, Loss: 0.0006\n",
      "Epoch 51, Loss: 0.0008\n",
      "Epoch 52, Loss: 0.0005\n",
      "Epoch 52, Loss: 0.0005\n",
      "Epoch 52, Loss: 0.0007\n",
      "Epoch 53, Loss: 0.0005\n",
      "Epoch 53, Loss: 0.0007\n",
      "Epoch 53, Loss: 0.0004\n",
      "Epoch 54, Loss: 0.0005\n",
      "Epoch 54, Loss: 0.0007\n",
      "Epoch 54, Loss: 0.0004\n",
      "Epoch 55, Loss: 0.0005\n",
      "Epoch 55, Loss: 0.0006\n",
      "Epoch 55, Loss: 0.0004\n",
      "Epoch 56, Loss: 0.0006\n",
      "Epoch 56, Loss: 0.0004\n",
      "Epoch 56, Loss: 0.0006\n",
      "Epoch 57, Loss: 0.0005\n",
      "Epoch 57, Loss: 0.0006\n",
      "Epoch 57, Loss: 0.0004\n",
      "Epoch 58, Loss: 0.0006\n",
      "Epoch 58, Loss: 0.0005\n",
      "Epoch 58, Loss: 0.0003\n",
      "Epoch 59, Loss: 0.0005\n",
      "Epoch 59, Loss: 0.0004\n",
      "Epoch 59, Loss: 0.0006\n",
      "Epoch 60, Loss: 0.0005\n",
      "Epoch 60, Loss: 0.0004\n",
      "Epoch 60, Loss: 0.0006\n",
      "Epoch 61, Loss: 0.0006\n",
      "Epoch 61, Loss: 0.0004\n",
      "Epoch 61, Loss: 0.0003\n",
      "Epoch 62, Loss: 0.0005\n",
      "Epoch 62, Loss: 0.0004\n",
      "Epoch 62, Loss: 0.0006\n",
      "Epoch 63, Loss: 0.0003\n",
      "Epoch 63, Loss: 0.0006\n",
      "Epoch 63, Loss: 0.0005\n",
      "Epoch 64, Loss: 0.0004\n",
      "Epoch 64, Loss: 0.0004\n",
      "Epoch 64, Loss: 0.0006\n",
      "Epoch 65, Loss: 0.0005\n",
      "Epoch 65, Loss: 0.0004\n",
      "Epoch 65, Loss: 0.0003\n",
      "Epoch 66, Loss: 0.0004\n",
      "Epoch 66, Loss: 0.0004\n",
      "Epoch 66, Loss: 0.0005\n",
      "Epoch 67, Loss: 0.0004\n",
      "Epoch 67, Loss: 0.0004\n",
      "Epoch 67, Loss: 0.0005\n",
      "Epoch 68, Loss: 0.0003\n",
      "Epoch 68, Loss: 0.0005\n",
      "Epoch 68, Loss: 0.0005\n",
      "Epoch 69, Loss: 0.0005\n",
      "Epoch 69, Loss: 0.0003\n",
      "Epoch 69, Loss: 0.0005\n",
      "Epoch 70, Loss: 0.0004\n",
      "Epoch 70, Loss: 0.0005\n",
      "Epoch 70, Loss: 0.0003\n",
      "Epoch 71, Loss: 0.0004\n",
      "Epoch 71, Loss: 0.0004\n",
      "Epoch 71, Loss: 0.0005\n",
      "Epoch 72, Loss: 0.0004\n",
      "Epoch 72, Loss: 0.0004\n",
      "Epoch 72, Loss: 0.0005\n",
      "Epoch 73, Loss: 0.0005\n",
      "Epoch 73, Loss: 0.0004\n",
      "Epoch 73, Loss: 0.0003\n",
      "Epoch 74, Loss: 0.0005\n",
      "Epoch 74, Loss: 0.0004\n",
      "Epoch 74, Loss: 0.0003\n",
      "Epoch 75, Loss: 0.0004\n",
      "Epoch 75, Loss: 0.0003\n",
      "Epoch 75, Loss: 0.0004\n",
      "Epoch 76, Loss: 0.0004\n",
      "Epoch 76, Loss: 0.0003\n",
      "Epoch 76, Loss: 0.0004\n",
      "Epoch 77, Loss: 0.0003\n",
      "Epoch 77, Loss: 0.0003\n",
      "Epoch 77, Loss: 0.0004\n",
      "Epoch 78, Loss: 0.0003\n",
      "Epoch 78, Loss: 0.0004\n",
      "Epoch 78, Loss: 0.0003\n",
      "Epoch 79, Loss: 0.0004\n",
      "Epoch 79, Loss: 0.0003\n",
      "Epoch 79, Loss: 0.0004\n",
      "Epoch 80, Loss: 0.0004\n",
      "Epoch 80, Loss: 0.0003\n",
      "Epoch 80, Loss: 0.0004\n",
      "Epoch 81, Loss: 0.0003\n",
      "Epoch 81, Loss: 0.0003\n",
      "Epoch 81, Loss: 0.0004\n",
      "Epoch 82, Loss: 0.0003\n",
      "Epoch 82, Loss: 0.0003\n",
      "Epoch 82, Loss: 0.0004\n",
      "Epoch 83, Loss: 0.0003\n",
      "Epoch 83, Loss: 0.0003\n",
      "Epoch 83, Loss: 0.0004\n",
      "Epoch 84, Loss: 0.0002\n",
      "Epoch 84, Loss: 0.0004\n",
      "Epoch 84, Loss: 0.0004\n",
      "Epoch 85, Loss: 0.0003\n",
      "Epoch 85, Loss: 0.0003\n",
      "Epoch 85, Loss: 0.0004\n",
      "Epoch 86, Loss: 0.0003\n",
      "Epoch 86, Loss: 0.0004\n",
      "Epoch 86, Loss: 0.0002\n",
      "Epoch 87, Loss: 0.0003\n",
      "Epoch 87, Loss: 0.0003\n",
      "Epoch 87, Loss: 0.0004\n",
      "Epoch 88, Loss: 0.0004\n",
      "Epoch 88, Loss: 0.0003\n",
      "Epoch 88, Loss: 0.0002\n",
      "Epoch 89, Loss: 0.0003\n",
      "Epoch 89, Loss: 0.0004\n",
      "Epoch 89, Loss: 0.0002\n",
      "Epoch 90, Loss: 0.0003\n",
      "Epoch 90, Loss: 0.0003\n",
      "Epoch 90, Loss: 0.0004\n",
      "Epoch 91, Loss: 0.0002\n",
      "Epoch 91, Loss: 0.0003\n",
      "Epoch 91, Loss: 0.0004\n",
      "Epoch 92, Loss: 0.0003\n",
      "Epoch 92, Loss: 0.0003\n",
      "Epoch 92, Loss: 0.0002\n",
      "Epoch 93, Loss: 0.0003\n",
      "Epoch 93, Loss: 0.0003\n",
      "Epoch 93, Loss: 0.0002\n",
      "Epoch 94, Loss: 0.0003\n",
      "Epoch 94, Loss: 0.0003\n",
      "Epoch 94, Loss: 0.0002\n",
      "Epoch 95, Loss: 0.0003\n",
      "Epoch 95, Loss: 0.0002\n",
      "Epoch 95, Loss: 0.0003\n",
      "Epoch 96, Loss: 0.0003\n",
      "Epoch 96, Loss: 0.0003\n",
      "Epoch 96, Loss: 0.0003\n",
      "Epoch 97, Loss: 0.0003\n",
      "Epoch 97, Loss: 0.0003\n",
      "Epoch 97, Loss: 0.0003\n",
      "Epoch 98, Loss: 0.0003\n",
      "Epoch 98, Loss: 0.0003\n",
      "Epoch 98, Loss: 0.0002\n",
      "Epoch 99, Loss: 0.0002\n",
      "Epoch 99, Loss: 0.0003\n",
      "Epoch 99, Loss: 0.0002\n",
      "Epoch 100, Loss: 0.0002\n",
      "Epoch 100, Loss: 0.0003\n",
      "Epoch 100, Loss: 0.0003\n",
      "Epoch 101, Loss: 0.0003\n",
      "Epoch 101, Loss: 0.0002\n",
      "Epoch 101, Loss: 0.0003\n",
      "Epoch 102, Loss: 0.0002\n",
      "Epoch 102, Loss: 0.0003\n",
      "Epoch 102, Loss: 0.0003\n",
      "Epoch 103, Loss: 0.0002\n",
      "Epoch 103, Loss: 0.0003\n",
      "Epoch 103, Loss: 0.0003\n",
      "Epoch 104, Loss: 0.0002\n",
      "Epoch 104, Loss: 0.0003\n",
      "Epoch 104, Loss: 0.0002\n",
      "Epoch 105, Loss: 0.0002\n",
      "Epoch 105, Loss: 0.0003\n",
      "Epoch 105, Loss: 0.0002\n",
      "Epoch 106, Loss: 0.0002\n",
      "Epoch 106, Loss: 0.0002\n",
      "Epoch 106, Loss: 0.0003\n",
      "Epoch 107, Loss: 0.0003\n",
      "Epoch 107, Loss: 0.0002\n",
      "Epoch 107, Loss: 0.0002\n",
      "Epoch 108, Loss: 0.0002\n",
      "Epoch 108, Loss: 0.0003\n",
      "Epoch 108, Loss: 0.0002\n",
      "Epoch 109, Loss: 0.0002\n",
      "Epoch 109, Loss: 0.0002\n",
      "Epoch 109, Loss: 0.0003\n",
      "Epoch 110, Loss: 0.0002\n",
      "Epoch 110, Loss: 0.0003\n",
      "Epoch 110, Loss: 0.0003\n",
      "Epoch 111, Loss: 0.0002\n",
      "Epoch 111, Loss: 0.0003\n",
      "Epoch 111, Loss: 0.0002\n",
      "Epoch 112, Loss: 0.0002\n",
      "Epoch 112, Loss: 0.0003\n",
      "Epoch 112, Loss: 0.0002\n",
      "Epoch 113, Loss: 0.0003\n",
      "Epoch 113, Loss: 0.0002\n",
      "Epoch 113, Loss: 0.0003\n",
      "Epoch 114, Loss: 0.0003\n",
      "Epoch 114, Loss: 0.0002\n",
      "Epoch 114, Loss: 0.0002\n",
      "Epoch 115, Loss: 0.0002\n",
      "Epoch 115, Loss: 0.0002\n",
      "Epoch 115, Loss: 0.0003\n",
      "Epoch 116, Loss: 0.0002\n",
      "Epoch 116, Loss: 0.0002\n",
      "Epoch 116, Loss: 0.0003\n",
      "Epoch 117, Loss: 0.0003\n",
      "Epoch 117, Loss: 0.0002\n",
      "Epoch 117, Loss: 0.0002\n",
      "Epoch 118, Loss: 0.0002\n",
      "Epoch 118, Loss: 0.0003\n",
      "Epoch 118, Loss: 0.0002\n",
      "Epoch 119, Loss: 0.0003\n",
      "Epoch 119, Loss: 0.0002\n",
      "Epoch 119, Loss: 0.0002\n",
      "Epoch 120, Loss: 0.0002\n",
      "Epoch 120, Loss: 0.0002\n",
      "Epoch 120, Loss: 0.0002\n",
      "Epoch 121, Loss: 0.0002\n",
      "Epoch 121, Loss: 0.0002\n",
      "Epoch 121, Loss: 0.0002\n",
      "Epoch 122, Loss: 0.0002\n",
      "Epoch 122, Loss: 0.0002\n",
      "Epoch 122, Loss: 0.0002\n",
      "Epoch 123, Loss: 0.0002\n",
      "Epoch 123, Loss: 0.0002\n",
      "Epoch 123, Loss: 0.0002\n",
      "Epoch 124, Loss: 0.0002\n",
      "Epoch 124, Loss: 0.0002\n",
      "Epoch 124, Loss: 0.0002\n",
      "Epoch 125, Loss: 0.0002\n",
      "Epoch 125, Loss: 0.0002\n",
      "Epoch 125, Loss: 0.0001\n",
      "Epoch 126, Loss: 0.0002\n",
      "Epoch 126, Loss: 0.0002\n",
      "Epoch 126, Loss: 0.0002\n",
      "Epoch 127, Loss: 0.0002\n",
      "Epoch 127, Loss: 0.0002\n",
      "Epoch 127, Loss: 0.0002\n",
      "Epoch 128, Loss: 0.0002\n",
      "Epoch 128, Loss: 0.0002\n",
      "Epoch 128, Loss: 0.0001\n",
      "Epoch 129, Loss: 0.0002\n",
      "Epoch 129, Loss: 0.0002\n",
      "Epoch 129, Loss: 0.0002\n",
      "Epoch 130, Loss: 0.0002\n",
      "Epoch 130, Loss: 0.0002\n",
      "Epoch 130, Loss: 0.0001\n",
      "Epoch 131, Loss: 0.0002\n",
      "Epoch 131, Loss: 0.0002\n",
      "Epoch 131, Loss: 0.0001\n",
      "Epoch 132, Loss: 0.0002\n",
      "Epoch 132, Loss: 0.0002\n",
      "Epoch 132, Loss: 0.0001\n",
      "Epoch 133, Loss: 0.0002\n",
      "Epoch 133, Loss: 0.0002\n",
      "Epoch 133, Loss: 0.0001\n",
      "Epoch 134, Loss: 0.0002\n",
      "Epoch 134, Loss: 0.0002\n",
      "Epoch 134, Loss: 0.0001\n",
      "Epoch 135, Loss: 0.0002\n",
      "Epoch 135, Loss: 0.0002\n",
      "Epoch 135, Loss: 0.0002\n",
      "Epoch 136, Loss: 0.0002\n",
      "Epoch 136, Loss: 0.0002\n",
      "Epoch 136, Loss: 0.0002\n",
      "Epoch 137, Loss: 0.0002\n",
      "Epoch 137, Loss: 0.0001\n",
      "Epoch 137, Loss: 0.0002\n",
      "Epoch 138, Loss: 0.0001\n",
      "Epoch 138, Loss: 0.0002\n",
      "Epoch 138, Loss: 0.0002\n",
      "Epoch 139, Loss: 0.0002\n",
      "Epoch 139, Loss: 0.0001\n",
      "Epoch 139, Loss: 0.0002\n",
      "Epoch 140, Loss: 0.0002\n",
      "Epoch 140, Loss: 0.0002\n",
      "Epoch 140, Loss: 0.0001\n",
      "Epoch 141, Loss: 0.0002\n",
      "Epoch 141, Loss: 0.0002\n",
      "Epoch 141, Loss: 0.0002\n",
      "Epoch 142, Loss: 0.0001\n",
      "Epoch 142, Loss: 0.0002\n",
      "Epoch 142, Loss: 0.0002\n",
      "Epoch 143, Loss: 0.0002\n",
      "Epoch 143, Loss: 0.0002\n",
      "Epoch 143, Loss: 0.0001\n",
      "Epoch 144, Loss: 0.0002\n",
      "Epoch 144, Loss: 0.0002\n",
      "Epoch 144, Loss: 0.0001\n",
      "Epoch 145, Loss: 0.0002\n",
      "Epoch 145, Loss: 0.0002\n",
      "Epoch 145, Loss: 0.0002\n",
      "Epoch 146, Loss: 0.0001\n",
      "Epoch 146, Loss: 0.0002\n",
      "Epoch 146, Loss: 0.0002\n",
      "Epoch 147, Loss: 0.0001\n",
      "Epoch 147, Loss: 0.0002\n",
      "Epoch 147, Loss: 0.0002\n",
      "Epoch 148, Loss: 0.0001\n",
      "Epoch 148, Loss: 0.0002\n",
      "Epoch 148, Loss: 0.0002\n",
      "Epoch 149, Loss: 0.0001\n",
      "Epoch 149, Loss: 0.0002\n",
      "Epoch 149, Loss: 0.0002\n",
      "Epoch 150, Loss: 0.0001\n",
      "Epoch 150, Loss: 0.0002\n",
      "Epoch 150, Loss: 0.0002\n",
      "Epoch 151, Loss: 0.0001\n",
      "Epoch 151, Loss: 0.0002\n",
      "Epoch 151, Loss: 0.0001\n",
      "Epoch 152, Loss: 0.0002\n",
      "Epoch 152, Loss: 0.0001\n",
      "Epoch 152, Loss: 0.0001\n",
      "Epoch 153, Loss: 0.0001\n",
      "Epoch 153, Loss: 0.0001\n",
      "Epoch 153, Loss: 0.0002\n",
      "Epoch 154, Loss: 0.0001\n",
      "Epoch 154, Loss: 0.0002\n",
      "Epoch 154, Loss: 0.0002\n",
      "Epoch 155, Loss: 0.0001\n",
      "Epoch 155, Loss: 0.0002\n",
      "Epoch 155, Loss: 0.0001\n",
      "Epoch 156, Loss: 0.0002\n",
      "Epoch 156, Loss: 0.0001\n",
      "Epoch 156, Loss: 0.0002\n",
      "Epoch 157, Loss: 0.0001\n",
      "Epoch 157, Loss: 0.0002\n",
      "Epoch 157, Loss: 0.0001\n",
      "Epoch 158, Loss: 0.0001\n",
      "Epoch 158, Loss: 0.0002\n",
      "Epoch 158, Loss: 0.0001\n",
      "Epoch 159, Loss: 0.0001\n",
      "Epoch 159, Loss: 0.0002\n",
      "Epoch 159, Loss: 0.0002\n",
      "Epoch 160, Loss: 0.0001\n",
      "Epoch 160, Loss: 0.0002\n",
      "Epoch 160, Loss: 0.0001\n",
      "Epoch 161, Loss: 0.0002\n",
      "Epoch 161, Loss: 0.0001\n",
      "Epoch 161, Loss: 0.0001\n",
      "Epoch 162, Loss: 0.0002\n",
      "Epoch 162, Loss: 0.0001\n",
      "Epoch 162, Loss: 0.0001\n",
      "Epoch 163, Loss: 0.0002\n",
      "Epoch 163, Loss: 0.0001\n",
      "Epoch 163, Loss: 0.0002\n",
      "Epoch 164, Loss: 0.0001\n",
      "Epoch 164, Loss: 0.0002\n",
      "Epoch 164, Loss: 0.0001\n",
      "Epoch 165, Loss: 0.0002\n",
      "Epoch 165, Loss: 0.0001\n",
      "Epoch 165, Loss: 0.0001\n",
      "Epoch 166, Loss: 0.0002\n",
      "Epoch 166, Loss: 0.0001\n",
      "Epoch 166, Loss: 0.0001\n",
      "Epoch 167, Loss: 0.0001\n",
      "Epoch 167, Loss: 0.0001\n",
      "Epoch 167, Loss: 0.0002\n",
      "Epoch 168, Loss: 0.0001\n",
      "Epoch 168, Loss: 0.0001\n",
      "Epoch 168, Loss: 0.0002\n",
      "Epoch 169, Loss: 0.0001\n",
      "Epoch 169, Loss: 0.0001\n",
      "Epoch 169, Loss: 0.0002\n",
      "Epoch 170, Loss: 0.0001\n",
      "Epoch 170, Loss: 0.0001\n",
      "Epoch 170, Loss: 0.0002\n",
      "Epoch 171, Loss: 0.0001\n",
      "Epoch 171, Loss: 0.0001\n",
      "Epoch 171, Loss: 0.0001\n",
      "Epoch 172, Loss: 0.0001\n",
      "Epoch 172, Loss: 0.0001\n",
      "Epoch 172, Loss: 0.0001\n",
      "Epoch 173, Loss: 0.0001\n",
      "Epoch 173, Loss: 0.0001\n",
      "Epoch 173, Loss: 0.0001\n",
      "Epoch 174, Loss: 0.0002\n",
      "Epoch 174, Loss: 0.0001\n",
      "Epoch 174, Loss: 0.0001\n",
      "Epoch 175, Loss: 0.0001\n",
      "Epoch 175, Loss: 0.0002\n",
      "Epoch 175, Loss: 0.0001\n",
      "Epoch 176, Loss: 0.0001\n",
      "Epoch 176, Loss: 0.0002\n",
      "Epoch 176, Loss: 0.0001\n",
      "Epoch 177, Loss: 0.0001\n",
      "Epoch 177, Loss: 0.0001\n",
      "Epoch 177, Loss: 0.0001\n",
      "Epoch 178, Loss: 0.0001\n",
      "Epoch 178, Loss: 0.0001\n",
      "Epoch 178, Loss: 0.0002\n",
      "Epoch 179, Loss: 0.0001\n",
      "Epoch 179, Loss: 0.0001\n",
      "Epoch 179, Loss: 0.0001\n",
      "Epoch 180, Loss: 0.0001\n",
      "Epoch 180, Loss: 0.0001\n",
      "Epoch 180, Loss: 0.0001\n",
      "Epoch 181, Loss: 0.0001\n",
      "Epoch 181, Loss: 0.0001\n",
      "Epoch 181, Loss: 0.0001\n",
      "Epoch 182, Loss: 0.0001\n",
      "Epoch 182, Loss: 0.0001\n",
      "Epoch 182, Loss: 0.0002\n",
      "Epoch 183, Loss: 0.0001\n",
      "Epoch 183, Loss: 0.0001\n",
      "Epoch 183, Loss: 0.0001\n",
      "Epoch 184, Loss: 0.0001\n",
      "Epoch 184, Loss: 0.0001\n",
      "Epoch 184, Loss: 0.0002\n",
      "Epoch 185, Loss: 0.0001\n",
      "Epoch 185, Loss: 0.0001\n",
      "Epoch 185, Loss: 0.0001\n",
      "Epoch 186, Loss: 0.0001\n",
      "Epoch 186, Loss: 0.0001\n",
      "Epoch 186, Loss: 0.0001\n",
      "Epoch 187, Loss: 0.0001\n",
      "Epoch 187, Loss: 0.0001\n",
      "Epoch 187, Loss: 0.0002\n",
      "Epoch 188, Loss: 0.0001\n",
      "Epoch 188, Loss: 0.0001\n",
      "Epoch 188, Loss: 0.0001\n",
      "Epoch 189, Loss: 0.0001\n",
      "Epoch 189, Loss: 0.0001\n",
      "Epoch 189, Loss: 0.0001\n",
      "Epoch 190, Loss: 0.0001\n",
      "Epoch 190, Loss: 0.0001\n",
      "Epoch 190, Loss: 0.0001\n",
      "Epoch 191, Loss: 0.0001\n",
      "Epoch 191, Loss: 0.0001\n",
      "Epoch 191, Loss: 0.0001\n",
      "Epoch 192, Loss: 0.0001\n",
      "Epoch 192, Loss: 0.0001\n",
      "Epoch 192, Loss: 0.0001\n",
      "Epoch 193, Loss: 0.0001\n",
      "Epoch 193, Loss: 0.0001\n",
      "Epoch 193, Loss: 0.0001\n",
      "Epoch 194, Loss: 0.0001\n",
      "Epoch 194, Loss: 0.0001\n",
      "Epoch 194, Loss: 0.0001\n",
      "Epoch 195, Loss: 0.0001\n",
      "Epoch 195, Loss: 0.0001\n",
      "Epoch 195, Loss: 0.0001\n",
      "Epoch 196, Loss: 0.0001\n",
      "Epoch 196, Loss: 0.0001\n",
      "Epoch 196, Loss: 0.0001\n",
      "Epoch 197, Loss: 0.0001\n",
      "Epoch 197, Loss: 0.0001\n",
      "Epoch 197, Loss: 0.0001\n",
      "Epoch 198, Loss: 0.0001\n",
      "Epoch 198, Loss: 0.0001\n",
      "Epoch 198, Loss: 0.0001\n",
      "Epoch 199, Loss: 0.0001\n",
      "Epoch 199, Loss: 0.0001\n",
      "Epoch 199, Loss: 0.0001\n",
      "Epoch 200, Loss: 0.0001\n",
      "Epoch 200, Loss: 0.0001\n",
      "Epoch 200, Loss: 0.0001\n"
     ]
    }
   ],
   "source": [
    "# 编码器\n",
    "class Encoder(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, hidden_size)\n",
    "        self.rnn = nn.GRU(hidden_size, hidden_size, batch_first=True)\n",
    "\n",
    "    def forward(self, src):\n",
    "        embedded = self.embedding(src)\n",
    "        outputs, hidden = self.rnn(embedded)\n",
    "        return outputs, hidden\n",
    "\n",
    "\n",
    "# 解码器\n",
    "class Decoder(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, hidden_size)\n",
    "        self.rnn = nn.GRU(hidden_size, hidden_size, batch_first=True)\n",
    "        self.fc_out = nn.Linear(hidden_size, vocab_size)\n",
    "\n",
    "    def forward(self, input, hidden):\n",
    "        embedded = self.embedding(input)\n",
    "        outputs, hidden = self.rnn(embedded, hidden)\n",
    "        return self.fc_out(outputs), hidden\n",
    "\n",
    "\n",
    "# 训练配置\n",
    "encoder = Encoder(ZH_VOCAB_SIZE, HIDDEN_SIZE)\n",
    "decoder = Decoder(EN_VOCAB_SIZE, HIDDEN_SIZE)\n",
    "optimizer = optim.Adam(list(encoder.parameters()) + list(decoder.parameters()), lr=LEARNING_RATE)\n",
    "criterion = nn.CrossEntropyLoss(ignore_index=en_word2idx['<pad>'])\n",
    "\n",
    "\n",
    "# 训练函数\n",
    "def train(encoder, decoder, n_epochs=10):\n",
    "    for epoch in range(n_epochs):\n",
    "        for input, target in dataloader:\n",
    "\n",
    "            _, hidden = encoder(input)\n",
    "\n",
    "            # 准备解码器输入输出，其实这一步可以在数据处理时做掉\n",
    "            decoder_input = target[:, :-1]  # 移除最后一个token\n",
    "            decoder_target = target[:, 1:]  # 移除第一个token\n",
    "\n",
    "            decoder_output, _ = decoder(decoder_input, hidden)\n",
    "\n",
    "            # 计算损失\n",
    "            loss = criterion(\n",
    "                # decoder_output本来是(batch_size, seq_len, vocab_size)，变成(batch_size * seq_len, vocab_size)\n",
    "                # decoder_target本来是(batch_size, seq_len)，变成(batch_size * seq_len)\n",
    "                # reshape(-1)表示保留最后一个维度\n",
    "                decoder_output.reshape(-1, decoder_output.size(-1)),\n",
    "                decoder_target.reshape(-1)\n",
    "            )\n",
    "\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            print(f'Epoch {epoch + 1}, Loss: {loss:.4f}')\n",
    "\n",
    "\n",
    "# 开始训练\n",
    "train(encoder, decoder, n_epochs=200)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 348,
   "id": "392d668e7d115e97",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T05:15:11.806134Z",
     "start_time": "2025-05-23T05:15:11.798219Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "deep learning is interesting.\n"
     ]
    }
   ],
   "source": [
    "# 翻译函数\n",
    "def translate(sentence, encoder, decoder):\n",
    "    tokens = tokenize_chinese(sentence)\n",
    "    numerical = [zh_word2idx.get(word, zh_word2idx['<unk>']) for word in tokens]\n",
    "    numerical = [zh_word2idx['<sos>']] + numerical + [zh_word2idx['<eos>']]\n",
    "    src_tensor = torch.LongTensor(numerical)\n",
    "\n",
    "    _, hidden = encoder(src_tensor)\n",
    "    trg_indexes = [en_word2idx['<sos>']]\n",
    "\n",
    "    for _ in range(50):\n",
    "        trg_tensor = torch.LongTensor([trg_indexes[-1]])\n",
    "        with torch.no_grad():\n",
    "            output, hidden = decoder(trg_tensor, hidden)\n",
    "        pred_token = output.argmax().item()\n",
    "        trg_indexes.append(pred_token)\n",
    "        if pred_token == en_word2idx['<eos>']:\n",
    "            break\n",
    "\n",
    "    return ' '.join([en_vocab[idx] for idx in trg_indexes[1:-1]])\n",
    "\n",
    "\n",
    "# 测试翻译\n",
    "test_sentence = \"深度学习很有趣\"\n",
    "print(translate(test_sentence, encoder, decoder))  # 输出应接近 \"let's study together\""
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
