{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "92c9da5866bdcf7",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-22T13:18:03.820478Z",
     "start_time": "2025-05-22T13:18:02.976641Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "{'；', '月', '江', '水', '碧', '大', '正', '击', '游', '飞', '层', '\\n', '，', '去', '遏', '湘', '头', '书', '竞', '沉', '当', '子', '怅', '到', '华', '侣', '秋', '同', '昔', '文', '挥', '浪', '底', '霜', '生', '林', '。', '北', '激', '问', '自', '空', '嵘', '寒', '意', '字', '谁', '橘', '遍', '百', '主', '苍', '立', '中', '年', '遒', '户', '土', '学', '侯', '？', '茂', '廓', '独', '浮', '染', '万', '曾', '少', '茫', '由', '地', '翔', '粪', '洲', '透', '天', '恰', '浅', '寥', '斥', '否', '稠', '风', '扬', '携', '争', '指', '往', '来', '红', '方', '尽', '岁', '峥', '记', '长', '舸', '忆', '舟', '类', '山', '气', '点', '看', '流', '鱼', '漫', '鹰'}\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "# 示例文本数据，一首诗\n",
    "text = \"\"\"\n",
    "独立寒秋，湘江北去，橘子洲头。\n",
    "看万山红遍，层林尽染；漫江碧透，百舸争流。\n",
    "鹰击长空，鱼翔浅底，万类霜天竞自由。\n",
    "怅寥廓，问苍茫大地，谁主沉浮？\n",
    "携来百侣曾游，忆往昔峥嵘岁月稠。\n",
    "恰同学少年，风华正茂；书生意气，挥斥方遒。\n",
    "指点江山，激扬文字，粪土当年万户侯。\n",
    "曾记否，到中流击水，浪遏飞舟？\n",
    "\"\"\"\n",
    "\n",
    "# 创建词汇表\n",
    "words = set(text)\n",
    "vocab_size = len(words)\n",
    "word_to_idx = {word: i for i, word in enumerate(words)}\n",
    "idx_to_word = {i: word for i, word in enumerate(words)}\n",
    "\n",
    "print(words)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "91280acf83012c57",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-22T13:18:03.828183Z",
     "start_time": "2025-05-22T13:18:03.824888Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[11, 63, 52, 43, 26, 12, 15, 2, 37, 13, 12, 47, 21, 74, 16, 36, 11, 104, 66, 101, 90, 48, 12, 10, 35, 92, 65, 0, 107, 2, 4, 75, 12, 49, 97, 86, 105, 36, 11, 108, 7, 96, 41, 12, 106, 72, 78, 32, 12, 66, 100, 33, 76, 18, 40, 70, 36, 11, 22, 79, 62, 12, 39, 51, 69, 5, 71, 12, 46, 50, 19, 64, 60, 11, 85, 89, 49, 25, 67, 8, 12, 98, 88, 28, 94, 42, 93, 1, 82, 36, 11, 77, 27, 58, 68, 54, 12, 83, 24, 6, 61, 0, 17, 34, 44, 102, 12, 30, 80, 91, 55, 36, 11, 87, 103, 2, 101, 12, 38, 84, 29, 45, 12, 73, 57, 20, 54, 66, 56, 59, 36, 11, 67, 95, 81, 12, 23, 53, 105, 7, 3, 12, 31, 14, 9, 99, 60, 11]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "# 超参数设置\n",
    "SEQ_LENGTH = 10  # 输入序列长度\n",
    "BATCH_SIZE = 1\n",
    "HIDDEN_SIZE = 128\n",
    "NUM_LAYERS= 1   # 隐藏层有2层，数据量少，层数多了反倒不好\n",
    "LEARNING_RATE = 0.005\n",
    "NUM_EPOCHS = 200\n",
    "\n",
    "\n",
    "# 创建训练数据\n",
    "class TextDataset(Dataset):\n",
    "    def __init__(self, text, seq_length):\n",
    "        self.text = text\n",
    "        self.seq_length = seq_length\n",
    "\n",
    "        # 转换为索引序列\n",
    "        self.data = [word_to_idx[ch] for ch in text]\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.data) - self.seq_length\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        # 文本里的某个序列\n",
    "        input_seq = self.data[idx:idx + self.seq_length]\n",
    "\n",
    "        # 目标序列\n",
    "        target_seq = self.data[idx + 1:idx + self.seq_length + 1]\n",
    "\n",
    "        # 相当于，假如语料为abcdefg, input_seq=abc, target_seq=bcd\n",
    "\n",
    "        return torch.LongTensor(input_seq), torch.LongTensor(target_seq)\n",
    "\n",
    "\n",
    "dataset = TextDataset(text, SEQ_LENGTH)\n",
    "dataloader = DataLoader(dataset, batch_size=BATCH_SIZE, shuffle=True)\n",
    "\n",
    "print(dataset.data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "d9e9fd013d3d01ba",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-22T13:26:33.955574Z",
     "start_time": "2025-05-22T13:26:33.947808Z"
    }
   },
   "outputs": [],
   "source": [
    "class CharLSTM(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size):\n",
    "        super().__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "\n",
    "        # 嵌入层（将词转为词向量）\n",
    "        # 词向量要输入给隐藏层，所以词向量的大小就是隐藏层的大小\n",
    "        self.embedding = nn.Embedding(vocab_size, hidden_size)\n",
    "\n",
    "        # RNN参数\n",
    "        # input_size表示输入的x的大小，一个词就是词向量，而这里词向量的大小就是hidden_size\n",
    "        self.rnn = nn.LSTM(input_size=hidden_size, hidden_size=hidden_size, num_layers=NUM_LAYERS, batch_first=True)\n",
    "\n",
    "        # 输出层\n",
    "        self.out_linear = nn.Linear(hidden_size, vocab_size)\n",
    "\n",
    "    def forward(self, x, states=None):\n",
    "\n",
    "        # 初始化隐藏层状态\n",
    "        # 每送进来一个Batch就初始化一个hidden\n",
    "        if states is None:\n",
    "            hidden = torch.zeros(NUM_LAYERS, BATCH_SIZE, self.hidden_size)\n",
    "            cell = torch.zeros(NUM_LAYERS, BATCH_SIZE, self.hidden_size)\n",
    "        else:\n",
    "            hidden, cell = states\n",
    "\n",
    "        # 嵌入层转换，x表示一个词序列，比如序列长度为10，那么embedded就是的形状就是(BATCH_SIZE, 10, hidden_size)，hidden_size就是词向量的大小（这里的词是一个汉字）\n",
    "        # 两维变成了三维\n",
    "        embedded = self.embedding(x)\n",
    "\n",
    "        # 存储所有时间步的输出\n",
    "        # 输入的形状决定了有多少个时间步，每个时间步的输出大小为hidden_size\n",
    "        # outputs就是所有时间步的结果，所以大小为(BATCH_SIZE, seq_len, hidden_size)\n",
    "        # 如果有多层，那么outputs中是最后一层的结果\n",
    "        # hidden为最后一个时间步的结果，如果有多层，那么hidden为每层最后一个时间步的结果\n",
    "        outputs, (hidden, cell) = self.rnn(embedded, (hidden, cell))\n",
    "        # 本来outputs是(1, 10, 128)，out就变成了(1, 10, 109)  109是词汇表的大小，表示10个词分别预测出了10个词，预测的是每个词的概率\n",
    "        out = self.out_linear(outputs)\n",
    "\n",
    "        return out, (hidden, cell)\n",
    "\n",
    "\n",
    "# 初始化模型\n",
    "model = CharLSTM(vocab_size, HIDDEN_SIZE)\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "2348428ce74982e4",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-22T13:27:27.385421Z",
     "start_time": "2025-05-22T13:26:39.353746Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch [20/200], Loss: 0.0768\n",
      "Epoch [40/200], Loss: 0.0716\n",
      "Epoch [60/200], Loss: 0.0714\n",
      "Epoch [80/200], Loss: 0.0682\n",
      "Epoch [100/200], Loss: 0.0693\n",
      "Epoch [120/200], Loss: 0.0674\n",
      "Epoch [140/200], Loss: 0.1553\n",
      "Epoch [160/200], Loss: 0.0708\n",
      "Epoch [180/200], Loss: 0.0689\n",
      "Epoch [200/200], Loss: 0.0701\n"
     ]
    }
   ],
   "source": [
    "def train(model, dataloader, epochs):\n",
    "    model.train()\n",
    "    for epoch in range(epochs):\n",
    "        total_loss = 0\n",
    "\n",
    "        for inputs, targets in dataloader:\n",
    "            # 前向传播\n",
    "            outputs, _ = model(inputs)\n",
    "\n",
    "            # 计算损失\n",
    "            # 用每个时间步的输出和每个时间步的标签进行比较，并计算损失\n",
    "            # outputs的大小是(1, 10, 109)， targets的大小是(1, 10)\n",
    "            # outputs.view(-1, vocab_size)表示将outputs变成(10, 109)，将三维变两维，一整个batch一起计算损失\n",
    "            loss = criterion(\n",
    "                outputs.view(-1, vocab_size),  # (batch_size*seq_length, vocab_size)\n",
    "                targets.view(-1)  # (batch_size*seq_length)\n",
    "            )\n",
    "\n",
    "            # 反向传播\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "\n",
    "            # 梯度裁剪防止爆炸\n",
    "            nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n",
    "\n",
    "            optimizer.step()\n",
    "\n",
    "            total_loss += loss.item()\n",
    "\n",
    "        # 每20轮打印进度\n",
    "        if (epoch + 1) % 20 == 0:\n",
    "            avg_loss = total_loss / len(dataloader)\n",
    "            print(f'Epoch [{epoch + 1}/{epochs}], Loss: {avg_loss:.4f}')\n",
    "\n",
    "\n",
    "train(model, dataloader, NUM_EPOCHS)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "8a4e36b033918def",
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-22T13:49:34.895028Z",
     "start_time": "2025-05-22T13:49:34.863156Z"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "鹰击长空，鱼翔浅底，万类霜天竞自由。\n",
      "怅寥廓，问苍茫大地，谁主沉浮？\n",
      "携来百侣曾游，忆往昔峥嵘岁月稠。\n",
      "恰同学少年，风华正茂；书生意气，挥斥方遒。\n",
      "指点江山，激扬文字，粪土当年记否，到中流击水，浪遏飞舟？\n"
     ]
    }
   ],
   "source": [
    "def generate_text(model, start_str, num_chars, temperature=0.8):\n",
    "    model.eval()\n",
    "    chars = [ch for ch in start_str]\n",
    "    input_seq = torch.LongTensor([word_to_idx[ch] for ch in chars[-SEQ_LENGTH:]])\n",
    "    states = None\n",
    "\n",
    "    for _ in range(num_chars):\n",
    "        # 输入形状调整\n",
    "        batch_input = input_seq.unsqueeze(0)  # (1, seq_len)\n",
    "\n",
    "        # 前向传播\n",
    "        with torch.no_grad():\n",
    "\n",
    "            # output中包含了每个时间步的输出，推理预测时，只需要取最后一个时间步的输出即可，比如输入“鹰击”，相当于有两个时间步，但是我们只需要第2个时间步的输出，而输出是词汇表中各个词的概率\n",
    "            # 而hidden表示隐藏层，在推理预测时，因为我们会连续预测，外层有一个for循环，所以hidden需要保存，以便下一次循环使用\n",
    "            output, states = model(batch_input, states)\n",
    "            last_output = output[0, -1, :]  # 最后时间步的输出\n",
    "\n",
    "        # 应用温度采样\n",
    "        # last_output / temperature，相当于将last_output缩小，比如[8,2,2] / 2 = [4,1,1]，使得三个选项对应的数字之间的差别变小了\n",
    "        # 当然如果temperature<1，那么就是放大差别，比如[8,2,2] / 0.5 = [16,4,4]\n",
    "        # probs为做了softmax之后的概率\n",
    "        probs = torch.softmax(last_output / temperature, dim=-1)\n",
    "\n",
    "        # 多项式采样，probs是一个概率，比如是[0.3,0.2,0.5]，那么就是从0,1,2中随机选一个，那么2被选中的概率就是50%\n",
    "        # 谁的概率大，随被采样的概率就大\n",
    "        char_idx = torch.multinomial(probs, 1).item()\n",
    "\n",
    "        # 更新输入序列\n",
    "        chars.append(idx_to_word[char_idx])\n",
    "        input_seq = torch.cat((input_seq[1:], torch.LongTensor([char_idx])))\n",
    "\n",
    "    return ''.join(chars)\n",
    "\n",
    "\n",
    "# 20表示预测20次, temperature越大，越随机\n",
    "print(generate_text(model, \"鹰击\", 100, temperature=2))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ae41aca968a676a8",
   "metadata": {},
   "source": [
    "在推理预测时，可以试着不传states是什么效果"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
