{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "cc126ad2",
   "metadata": {},
   "source": [
    "# 数据准备"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d176ab4c",
   "metadata": {},
   "source": [
    "# !wget https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt"
   ]
  },
  {
   "cell_type": "code",
   "id": "9f3a185f",
   "metadata": {},
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "\n",
    "# 读取Shakespeare文本文件\n",
    "with open('shakespeare.txt', 'r', encoding='utf-8') as f:\n",
    "    text = f.read()\n",
    "\n",
    "# 打印文本的前100个字符\n",
    "print(f\"文本长度: {len(text)}\")\n",
    "print(f\"文本前100个字符:\\n{text[:100]}\")\n",
    "\n",
    "# 创建字符级别的字典\n",
    "vocab = sorted(set(text))\n",
    "print(f\"字典大小: {len(vocab)}\")\n",
    "print(f\"字典内容: {vocab}\")\n",
    "\n",
    "# 创建字符到索引的映射\n",
    "char_to_idx = {char: idx for idx, char in enumerate(vocab)}\n",
    "idx_to_char = {idx: char for idx, char in enumerate(vocab)}\n",
    "\n",
    "# 打印映射示例\n",
    "print(\"\\n字符到索引的映射示例:\")\n",
    "for char in text[:20]:\n",
    "    print(f\"'{char}' -> {char_to_idx[char]}\")\n",
    "\n",
    "# 将文本转换为数字序列\n",
    "text_as_int = np.array([char_to_idx[c] for c in text]) #把全部文本都变为id\n",
    "print(f\"\\n文本转换为数字序列的前20个元素:\\n{text_as_int[:20]}\")\n",
    "print(f\"将数字序列转回字符:\\n{''.join([idx_to_char[idx] for idx in text_as_int[:20]])}\")\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "id": "4a08bc2a",
   "metadata": {},
   "source": [
    "# 把莎士比亚文集分成一个一个的样本"
   ]
  },
  {
   "cell_type": "code",
   "id": "e2dd4a94",
   "metadata": {},
   "source": [
    "# 定义序列长度和批次大小\n",
    "import torch\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "seq_length = 100  # 每个样本的序列长度\n",
    "batch_size = 64   # 每个批次的样本数量\n",
    "\n",
    "# 创建自定义数据集类\n",
    "class ShakespeareDataset(Dataset):\n",
    "    def __init__(self, text_as_int, seq_length):\n",
    "        self.text_as_int = text_as_int\n",
    "        self.seq_length = seq_length\n",
    "        self.sub_len = seq_length + 1 #一个样本的长度\n",
    "        \n",
    "    def __len__(self):\n",
    "        # 计算可能的序列数量\n",
    "        return len(self.text_as_int)//(self.seq_length+1) #+1是因为要预测下一个字符\n",
    "        \n",
    "    def __getitem__(self, idx):\n",
    "        # 将numpy数组转换为长整型(Long)\n",
    "        return torch.tensor(self.text_as_int[idx*self.sub_len:(idx+1)*self.sub_len], dtype=torch.long)\n",
    "\n",
    "# 定义collate函数，用于处理批次数据\n",
    "def collate_fct(batch):\n",
    "    # 将批次数据堆叠成张量，确保类型为long\n",
    "    batch = torch.stack(batch)\n",
    "    # 输入序列是除了最后一个字符的所有字符\n",
    "    input_batch = batch[:, :-1]\n",
    "    # 目标序列是除了第一个字符的所有字符\n",
    "    target_batch = batch[:, 1:]\n",
    "    return input_batch, target_batch\n",
    "\n",
    "# 创建数据集实例\n",
    "shakespeare_dataset = ShakespeareDataset(text_as_int, seq_length)\n",
    "\n",
    "# 创建数据加载器\n",
    "dataloader = DataLoader(shakespeare_dataset, batch_size=batch_size, shuffle=True, drop_last=True, collate_fn=collate_fct)\n",
    "\n",
    "# 打印示例，查看输入和目标\n",
    "for input_batch, target_batch in dataloader:\n",
    "    print(f\"输入批次形状: {input_batch.shape}\")\n",
    "    print(f\"目标批次形状: {target_batch.shape}\")\n",
    "    \n",
    "    # 打印第一个样本的输入和目标\n",
    "    print(input_batch)\n",
    "    print(target_batch)\n",
    "    break\n",
    "\n",
    "print(f\"\\n数据集大小: {len(shakespeare_dataset)}\")\n",
    "print(f\"批次数量: {len(dataloader)}\")\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "0606fa96",
   "metadata": {},
   "source": [
    "11043//64"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "id": "0353bb1a",
   "metadata": {},
   "source": [
    "# 搭建模型"
   ]
  },
  {
   "cell_type": "code",
   "id": "50a9336b",
   "metadata": {},
   "source": [
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "\n",
    "# 定义LSTM模型\n",
    "class ShakespeareLSTM(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_dim, batch_size):\n",
    "        super(ShakespeareLSTM, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        self.lstm = nn.LSTM(\n",
    "            embedding_dim,\n",
    "            hidden_dim,\n",
    "            num_layers=1,\n",
    "            bidirectional=False,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.dense = nn.Linear(hidden_dim, vocab_size)\n",
    "        \n",
    "    def forward(self, x, hidden=None):\n",
    "        # 输入形状: [batch_size, sequence_length]\n",
    "        x = self.embedding(x)  # 形状: [batch_size, sequence_length, embedding_dim]\n",
    "        output, hidden = self.lstm(x, hidden)  # 形状: [batch_size, sequence_length, hidden_dim]\n",
    "        output = self.dense(output)  # 形状: [batch_size, sequence_length, vocab_size]\n",
    "        return output, hidden\n",
    "    \n",
    "\n",
    "\n",
    "# 定义模型参数\n",
    "vocab_size = len(char_to_idx)  # 词汇表大小\n",
    "embedding_dim = 256  # 嵌入维度\n",
    "lstm_units = 1024  # LSTM单元数量\n",
    "\n",
    "# 实例化模型\n",
    "model = ShakespeareLSTM(vocab_size, embedding_dim, lstm_units, batch_size)\n",
    "print(model)\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "8ef60cf5",
   "metadata": {},
   "source": [
    "# 创建一个小批量数据来测试模型\n",
    "batch_size = 4\n",
    "seq_length = 100\n",
    "test_input = torch.randint(0, vocab_size, (batch_size, seq_length))\n",
    "\n",
    "# 进行前向计算\n",
    "with torch.no_grad():\n",
    "    output, hidden = model(test_input)\n",
    "    \n",
    "# 打印输出形状\n",
    "print(f\"输入形状: {test_input.shape}\")\n",
    "print(f\"输出形状: {output.shape}\")\n",
    "\n",
    "# 验证输出是否符合预期\n",
    "assert output.shape == (batch_size, seq_length, vocab_size), \"输出形状不符合预期\"\n",
    "assert hidden.shape == (1, batch_size, lstm_units), \"隐藏状态形状不符合预期\"\n",
    "\n",
    "print(\"模型前向计算验证成功！\")\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "markdown",
   "id": "7d9f0a18",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "code",
   "id": "95e6a3f8",
   "metadata": {},
   "source": [
    "from tqdm.auto import tqdm\n",
    "# 定义损失函数和优化器\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "# 训练函数\n",
    "def train_step(model, dataloader, optimizer, criterion, epochs=5):\n",
    "    losses = []\n",
    "    \n",
    "    for epoch in range(epochs):\n",
    "        model.train()\n",
    "        epoch_loss = 0\n",
    "        \n",
    "        # 使用tqdm创建进度条\n",
    "        with tqdm(dataloader, desc=f\"轮次 {epoch+1}/{epochs}\") as pbar:\n",
    "            for input_batch, target_batch in pbar:\n",
    "                input_batch = input_batch.to(device)\n",
    "                target_batch = target_batch.to(device)\n",
    "                optimizer.zero_grad()\n",
    "                \n",
    "                # 前向传播\n",
    "                output, _ = model(input_batch)\n",
    "                \n",
    "                # 计算损失\n",
    "                # 重塑输出和目标以适应CrossEntropyLoss\n",
    "                output = output.reshape(-1, vocab_size)\n",
    "                target_batch = target_batch.reshape(-1)\n",
    "                \n",
    "                loss = criterion(output, target_batch)\n",
    "                \n",
    "                # 反向传播\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                \n",
    "                current_loss = loss.item()\n",
    "                epoch_loss += current_loss\n",
    "                \n",
    "                # 更新进度条显示的损失值\n",
    "                pbar.set_postfix({\"损失\": f\"{current_loss:.4f}\"})\n",
    "        \n",
    "        # 计算并记录每个epoch的平均损失\n",
    "        avg_epoch_loss = epoch_loss / len(dataloader)\n",
    "        losses.append(avg_epoch_loss)\n",
    "        print(f\"轮次 {epoch+1}/{epochs} 完成, 平均损失: {avg_epoch_loss:.4f}\")\n",
    "    \n",
    "    return losses\n",
    "\n",
    "# 将模型移动到设备上\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "model = model.to(device)\n",
    "\n",
    "# 开始训练循环\n",
    "losses = train_step(model, dataloader, optimizer, criterion, epochs=5)\n",
    "  "
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "2f863293",
   "metadata": {},
   "source": [
    "import matplotlib.pyplot as plt\n",
    "# 绘制损失曲线\n",
    "plt.figure(figsize=(10, 6))\n",
    "plt.plot(losses)\n",
    "plt.title('Training Loss')\n",
    "plt.xlabel('Epochs')\n",
    "plt.ylabel('Loss')\n",
    "plt.grid(True)\n",
    "plt.show()"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "23093630",
   "metadata": {},
   "source": [
    "# 理解torch.multinomial函数的小例子\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "\n",
    "# 创建一个概率分布\n",
    "probs = torch.tensor([0.1, 0.2, 0.3, 0.4])\n",
    "print(\"概率分布:\", probs)\n",
    "\n",
    "# 从概率分布中采样一个元素\n",
    "sample = torch.multinomial(probs, num_samples=1)\n",
    "print(\"采样一个元素:\", sample.item(), \"对应概率:\", probs[sample.item()].item())\n",
    "\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "198c39a8",
   "metadata": {},
   "source": [
    "# 展示temperature参数对softmax输出的影响\n",
    "import torch\n",
    "import torch.nn.functional as F\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# 创建一个模拟的logits输出\n",
    "logits = torch.tensor([1.0, 2.0, 5.0, 3.0, 0.5])\n",
    "print(\"原始logits值:\", logits)\n",
    "\n",
    "# 使用不同的temperature值\n",
    "temperatures = [0.5, 1.0, 2.0]\n",
    "\n",
    "plt.figure(figsize=(12, 6))\n",
    "\n",
    "for i, temp in enumerate(temperatures):\n",
    "    # 应用temperature\n",
    "    scaled_logits = logits / temp\n",
    "    \n",
    "    # 应用softmax获取概率分布\n",
    "    probabilities = F.softmax(scaled_logits, dim=0)\n",
    "    \n",
    "    # 打印结果\n",
    "    print(f\"\\ntemperature={temp}时的概率分布:\")\n",
    "    print(probabilities)\n",
    "    \n",
    "    # 可视化\n",
    "    plt.subplot(1, len(temperatures), i+1)\n",
    "    plt.bar(range(len(probabilities)), probabilities.numpy())\n",
    "    plt.title(f\"Temperature = {temp}\")\n",
    "    plt.ylim(0, 1)\n",
    "    plt.xticks(range(len(probabilities)), [f\"token_{i}\" for i in range(len(probabilities))])\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()\n",
    "\n",
    "print(\"\\n解释:\")\n",
    "print(\"- 较低的temperature (如0.5) 使概率分布更加尖锐，最高概率的token被选中的可能性更大\")\n",
    "print(\"- 标准temperature (1.0) 保持原始概率分布\")\n",
    "print(\"- 较高的temperature (如2.0) 使概率分布更加平坦，增加了采样的随机性\")\n"
   ],
   "outputs": [],
   "execution_count": null
  },
  {
   "cell_type": "code",
   "id": "b595d015",
   "metadata": {},
   "source": [
    "# 生成文本函数\n",
    "def generate_text(model, start_string, char2idx, idx2char, num_generate=1000, temperature=1.0):\n",
    "    # 将模型设置为评估模式\n",
    "    model.eval()\n",
    "    \n",
    "    # 将起始字符串转换为索引\n",
    "    input_indices = [char2idx[char] for char in start_string]\n",
    "    input_tensor = torch.tensor(input_indices, dtype=torch.long).unsqueeze(0).to(device) #unsqueeze(0) 在第0维上增加一个维度\n",
    "    print(input_tensor.shape)\n",
    "    # 存储生成的文本\n",
    "    generated_text = start_string\n",
    "    \n",
    "    # 隐藏状态初始化为None，模型会自动初始化\n",
    "    hidden = None\n",
    "    \n",
    "    # 生成指定数量的字符\n",
    "    with torch.no_grad():\n",
    "        for _ in range(num_generate):\n",
    "            # 获取模型预测\n",
    "            output, hidden = model(input_tensor,hidden)\n",
    "            \n",
    "            # 应用温度参数调整预测分布\n",
    "            logits = output[:, -1, :] / temperature\n",
    "            \n",
    "            # 从调整后的分布中采样下一个字符\n",
    "            probabilities = F.softmax(logits, dim=-1)\n",
    "            predicted_id = torch.multinomial(probabilities, 1) #从概率分布中采样一个元素，概率越大，被选中的可能性越大\n",
    "            \n",
    "            # 将预测的字符添加到生成文本中\n",
    "            generated_char = idx2char[predicted_id.item()]\n",
    "            generated_text += generated_char #放入到生成序列\n",
    "            \n",
    "            # 更新输入张量为当前预测的字符\n",
    "            input_tensor = predicted_id\n",
    "    \n",
    "    return generated_text\n",
    "\n",
    "generate_text(model, 'hello',char_to_idx,idx_to_char)"
   ],
   "outputs": [],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
