{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "b00e9c02-982a-48a1-99a5-894075ce654f",
   "metadata": {},
   "source": [
    "中文小说生成器\n",
    "项目简介\n",
    "这是一个基于深度学习的中文小说生成器，使用PyTorch框架实现。项目通过分析中文小说文本的语言模式和结构特征，训练一个能够生成连贯、风格相似文本的神经网络模型。\n",
    "数据来源\n",
    "• 输入数据为中文小说文本文件(如doupo.txt)\n",
    "• 支持同时加载多个小说文件进行训练\n",
    "• 默认使用GBK编码，可调整以适应不同文本\n",
    "数据预处理\n",
    "1. 文本清洗：移除特殊字符，保留中文、常见标点和基本英文符号\n",
    "2. 空格处理：规范化空白字符，保留单个空格\n",
    "3. 标点标准化：统一不同形式的引号和其他标点符号\n",
    "分词与词汇表\n",
    "• 使用jieba分词库进行中文分词\n",
    "• 支持加载自定义词典(user_dict.txt)提高专业术语识别\n",
    "• 构建词汇表并过滤低频词(默认最小频率为5)\n",
    "• 添加特殊标记：<PAD>, <UNK>, <SOS>, <EOS>\n",
    "训练数据格式\n",
    "• 使用滑动窗口方法创建序列样本\n",
    "• 默认序列长度为50个词\n",
    "• 目标值为序列的下一个词\n",
    "• 使用批处理提高训练效率(默认批量大小128)\n",
    "模型思路\n",
    "模型架构\n",
    "本项目采用基于GRU(Gated Recurrent Unit)的序列生成模型，包含以下组件：\n",
    "1. 词嵌入层(Embedding Layer)\n",
    "   • 将离散词汇索引转换为密集向量表示\n",
    "   • 可调整嵌入维度(默认256维)\n",
    "2. GRU层(Gated Recurrent Unit)\n",
    "   • 使用门控机制捕捉长距离依赖关系\n",
    "   • 支持多层堆叠(默认3层)\n",
    "   • 可选双向GRU(默认关闭)\n",
    "3. 注意力机制(Attention Mechanism)\n",
    "   • 增强模型对关键信息的关注能力\n",
    "   • 提高生成长文本的连贯性\n",
    "4. 输出层\n",
    "   • 全连接层将隐藏状态映射到词汇表空间\n",
    "   • 使用Softmax计算下一个词的概率分布\n",
    "训练策略\n",
    "1. 优化器：使用Adam优化器，学习率默认0.001\n",
    "2. 损失函数：交叉熵损失函数\n",
    "3. 正则化：\n",
    "   • Dropout防止过拟合(默认0.4)\n",
    "   • 梯度裁剪(最大值5)防止梯度爆炸\n",
    "4. 学习率调度：使用ReduceLROnPlateau动态调整学习率\n",
    "5. 模型保存：自动保存验证损失最小的模型\n",
    "文本生成\n",
    "1. 生成策略：使用Top-K采样(默认K=10)提高多样性\n",
    "2. 温度参数：通过温度参数(默认0.8)控制生成随机性\n",
    "3. 停止条件：遇到<EOS>标记或达到指定长度时停止生成"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cb17b9a-b751-4056-aee6-89cdec385ef8",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import numpy as np\n",
    "import re\n",
    "import os\n",
    "import time\n",
    "from collections import Counter\n",
    "import jieba\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 1. 获取小说文本数据 - 支持多个文件\n",
    "def get_novel_text(file_paths=['doupo.txt'], encoding='GBK'):\n",
    "    \"\"\"\n",
    "    读取多个小说文本文件\n",
    "    参数\n",
    "    ----\n",
    "    file_paths : list\n",
    "        文本文件路径列表\n",
    "    encoding  : str\n",
    "        文件编码\n",
    "    返回\n",
    "    ----\n",
    "    text : str\n",
    "        合并后的文本内容\n",
    "    \"\"\"\n",
    "    all_text = \"\"\n",
    "    for file_path in file_paths:\n",
    "        if os.path.exists(file_path):\n",
    "            with open(file_path, 'r', encoding=encoding) as f:\n",
    "                all_text += f.read() + \"\\n\"\n",
    "        else:\n",
    "            print(f\"警告: 文件 {file_path} 不存在，跳过\")\n",
    "    return all_text\n",
    "\n",
    "# 2. 改进的文本预处理\n",
    "def preprocess_text(text):\n",
    "    # 移除特殊字符和多余空格\n",
    "    text = re.sub(r'\\s+', ' ', text)  # 保留单个空格\n",
    "    # 保留中文、常见标点和基本英文标点\n",
    "    text = re.sub(r'[^\\u4e00-\\u9fa5a-zA-Z0-9，。！？；：,.!?;:\\'\"“”‘’（）《》\\-\\s]', '', text)\n",
    "    # 标准化标点\n",
    "    text = re.sub(r'[“”]', '\"', text)\n",
    "    text = re.sub(r'[‘’]', \"'\", text)\n",
    "    return text.strip()\n",
    "\n",
    "# 3. 改进的中文分词和构建词汇表\n",
    "def build_vocab(text, min_freq=5):\n",
    "    # 使用jieba进行中文分词，添加自定义词典\n",
    "    jieba.load_userdict('user_dict.txt') if os.path.exists('user_dict.txt') else None\n",
    "    \n",
    "    # 分词并过滤停用词\n",
    "    words = list(jieba.cut(text))\n",
    "    \n",
    "    # 构建词汇表\n",
    "    vocab_counter = Counter(words)\n",
    "    \n",
    "    # 过滤低频词\n",
    "    vocab = [word for word, count in vocab_counter.items() if count >= min_freq]\n",
    "    vocab = sorted(vocab, key=lambda x: vocab_counter[x], reverse=True)\n",
    "    \n",
    "    # 添加特殊标记\n",
    "    special_tokens = ['<PAD>', '<UNK>', '<SOS>', '<EOS>']\n",
    "    vocab = special_tokens + vocab\n",
    "    \n",
    "    vocab_size = len(vocab)\n",
    "    word_to_idx = {word: i for i, word in enumerate(vocab)}\n",
    "    idx_to_word = {i: word for i, word in enumerate(vocab)}\n",
    "    \n",
    "    print(f\"词汇表大小: {vocab_size}\")\n",
    "    print(f\"前20个高频词: {vocab[:20]}\")\n",
    "    \n",
    "    return words, vocab, word_to_idx, idx_to_word, vocab_size\n",
    "\n",
    "# 4. 改进的准备训练数据 - 支持批量生成\n",
    "def prepare_training_data(words, word_to_idx, seq_length=50, batch_size=128):\n",
    "    # 将文本转换为索引序列，处理未知词\n",
    "    unk_idx = word_to_idx.get('<UNK>', 0)\n",
    "    indices = [word_to_idx.get(word, unk_idx) for word in words]\n",
    "    \n",
    "    # 创建训练数据\n",
    "    X = []\n",
    "    y = []\n",
    "    \n",
    "    # 使用滑动窗口创建样本\n",
    "    for i in range(len(indices) - seq_length):\n",
    "        X.append(indices[i:i+seq_length])\n",
    "        y.append(indices[i+seq_length])\n",
    "    \n",
    "    # 转换为PyTorch张量\n",
    "    X = torch.tensor(X, dtype=torch.long)\n",
    "    y = torch.tensor(y, dtype=torch.long)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    dataset = torch.utils.data.TensorDataset(X, y)\n",
    "    dataloader = torch.utils.data.DataLoader(\n",
    "        dataset, \n",
    "        batch_size=batch_size, \n",
    "        shuffle=True,\n",
    "        drop_last=True\n",
    "    )\n",
    "    \n",
    "    return dataloader\n",
    "\n",
    "# 5. 更复杂的GRU模型\n",
    "class EnhancedGRUModel(nn.Module):\n",
    "    def __init__(self, vocab_size, hidden_size=512, embedding_dim=256, \n",
    "                 n_layers=3, dropout=0.4, bidirectional=False):\n",
    "        super(EnhancedGRUModel, self).__init__()\n",
    "        self.hidden_size = hidden_size\n",
    "        self.n_layers = n_layers\n",
    "        self.bidirectional = bidirectional\n",
    "        \n",
    "        # 词嵌入层\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim)\n",
    "        \n",
    "        # GRU层\n",
    "        self.gru = nn.GRU(\n",
    "            embedding_dim, \n",
    "            hidden_size, \n",
    "            n_layers,\n",
    "            batch_first=True,\n",
    "            dropout=dropout if n_layers > 1 else 0,\n",
    "            bidirectional=bidirectional\n",
    "        )\n",
    "        \n",
    "        # 双向GRU需要调整输出维度\n",
    "        factor = 2 if bidirectional else 1\n",
    "        \n",
    "        # 注意力层\n",
    "        self.attention = nn.Sequential(\n",
    "            nn.Linear(hidden_size * factor, hidden_size),\n",
    "            nn.Tanh(),\n",
    "            nn.Linear(hidden_size, 1)\n",
    "        )\n",
    "        \n",
    "        # 全连接层\n",
    "        self.fc = nn.Linear(hidden_size * factor, vocab_size)\n",
    "        \n",
    "        # Dropout层\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, x, hidden=None):\n",
    "        batch_size = x.size(0)\n",
    "        \n",
    "        # 初始化隐藏状态\n",
    "        if hidden is None:\n",
    "            hidden = self.init_hidden(batch_size)\n",
    "            if x.is_cuda:\n",
    "                hidden = hidden.cuda()\n",
    "        \n",
    "        # 词嵌入\n",
    "        embedded = self.embedding(x)\n",
    "        embedded = self.dropout(embedded)\n",
    "        \n",
    "        # GRU处理\n",
    "        output, hidden = self.gru(embedded, hidden)\n",
    "        \n",
    "        # 注意力机制\n",
    "        attn_weights = self.attention(output)\n",
    "        attn_weights = torch.softmax(attn_weights, dim=1)\n",
    "        context = torch.sum(attn_weights * output, dim=1)\n",
    "        \n",
    "        # 输出层\n",
    "        output = self.fc(context)\n",
    "        \n",
    "        return output, hidden\n",
    "    \n",
    "    def init_hidden(self, batch_size):\n",
    "        # 初始化隐藏状态\n",
    "        num_directions = 2 if self.bidirectional else 1\n",
    "        return torch.zeros(self.n_layers * num_directions, batch_size, self.hidden_size)\n",
    "\n",
    "# 6. 改进的训练函数 - 添加学习率调度和模型保存\n",
    "def train_model(model, dataloader, epochs=100, lr=0.001, model_save_path='model.pth'):\n",
    "    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    print(f\"使用设备: {device}\")\n",
    "    model = model.to(device)\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=lr)\n",
    "    \n",
    "    # 学习率调度器\n",
    "    scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n",
    "        optimizer, mode='min', factor=0.5, patience=5, verbose=True\n",
    "    )\n",
    "    \n",
    "    best_loss = float('inf')\n",
    "    \n",
    "    for epoch in range(epochs):\n",
    "        epoch_loss = 0\n",
    "        model.train()\n",
    "        \n",
    "        # 使用tqdm显示进度条\n",
    "        pbar = tqdm(dataloader, desc=f\"Epoch {epoch+1}/{epochs}\")\n",
    "        for inputs, targets in pbar:\n",
    "            inputs = inputs.to(device)\n",
    "            targets = targets.to(device)\n",
    "            \n",
    "            # 初始化隐藏状态\n",
    "            hidden = model.init_hidden(inputs.size(0)).to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            output, hidden = model(inputs, hidden)\n",
    "            loss = criterion(output, targets)\n",
    "            \n",
    "            loss.backward()\n",
    "            # 梯度裁剪\n",
    "            nn.utils.clip_grad_norm_(model.parameters(), max_norm=5)\n",
    "            optimizer.step()\n",
    "            \n",
    "            epoch_loss += loss.item()\n",
    "            pbar.set_postfix(loss=loss.item())\n",
    "        \n",
    "        avg_loss = epoch_loss / len(dataloader)\n",
    "        print(f\"Epoch [{epoch+1}/{epochs}], 平均损失: {avg_loss:.4f}\")\n",
    "        \n",
    "        # 更新学习率\n",
    "        scheduler.step(avg_loss)\n",
    "        \n",
    "        # 保存最佳模型\n",
    "        if avg_loss < best_loss:\n",
    "            best_loss = avg_loss\n",
    "            torch.save({\n",
    "                'epoch': epoch,\n",
    "                'model_state_dict': model.state_dict(),\n",
    "                'optimizer_state_dict': optimizer.state_dict(),\n",
    "                'loss': avg_loss,\n",
    "            }, model_save_path)\n",
    "            print(f\"模型已保存到 {model_save_path}\")\n",
    "    \n",
    "    return model\n",
    "\n",
    "# 7. 改进的文本生成函数 - 支持更长的续写\n",
    "def generate_text(model, starting_text, word_to_idx, idx_to_word, \n",
    "                 num_words=200, temperature=0.8, top_k=10):\n",
    "    model.eval()\n",
    "    device = next(model.parameters()).device\n",
    "    \n",
    "    # 分词\n",
    "    words = list(jieba.cut(starting_text))\n",
    "    \n",
    "    # 初始化隐藏状态\n",
    "    hidden = model.init_hidden(1).to(device)\n",
    "    \n",
    "    # 处理输入序列\n",
    "    input_seq = []\n",
    "    for word in words:\n",
    "        if word in word_to_idx:\n",
    "            input_seq.append(word_to_idx[word])\n",
    "        else:\n",
    "            # 使用未知词标记\n",
    "            input_seq.append(word_to_idx.get('<UNK>', 0))\n",
    "    \n",
    "    # 生成预测\n",
    "    predicted_words = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        # 处理初始输入\n",
    "        for idx in input_seq:\n",
    "            input_tensor = torch.tensor([[idx]]).to(device)\n",
    "            _, hidden = model(input_tensor, hidden)\n",
    "        \n",
    "        # 使用最后一个词作为当前输入\n",
    "        current_idx = input_tensor\n",
    "        \n",
    "        for _ in range(num_words):\n",
    "            output, hidden = model(current_idx, hidden)\n",
    "            \n",
    "            # 使用温度参数调整概率分布\n",
    "            output = output / temperature\n",
    "            prob = nn.functional.softmax(output, dim=1).cpu().data.numpy()[0]\n",
    "            \n",
    "            # Top-K采样\n",
    "            top_k_probs, top_k_indices = torch.topk(torch.tensor(prob), top_k)\n",
    "            top_k_probs = top_k_probs.numpy() / top_k_probs.numpy().sum()\n",
    "            \n",
    "            # 从Top-K中采样\n",
    "            chosen_idx = np.random.choice(top_k_indices, p=top_k_probs)\n",
    "            \n",
    "            # 添加到结果\n",
    "            word = idx_to_word[chosen_idx]\n",
    "            predicted_words.append(word)\n",
    "            \n",
    "            # 更新当前输入\n",
    "            current_idx = torch.tensor([[chosen_idx]]).to(device)\n",
    "            \n",
    "            # 如果生成了结束标记，提前停止\n",
    "            if word == '<EOS>':\n",
    "                break\n",
    "    \n",
    "    # 合并结果\n",
    "    result = starting_text + ''.join(predicted_words)\n",
    "    return result\n",
    "\n",
    "# 8. 加载已保存的模型\n",
    "def load_model(model, model_path, device=None):\n",
    "    if device is None:\n",
    "        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "    \n",
    "    if os.path.exists(model_path):\n",
    "        checkpoint = torch.load(model_path, map_location=device)\n",
    "        model.load_state_dict(checkpoint['model_state_dict'])\n",
    "        model.to(device)\n",
    "        print(f\"从 {model_path} 加载模型\")\n",
    "        return model\n",
    "    else:\n",
    "        print(f\"警告: 模型文件 {model_path} 不存在\")\n",
    "        return model\n",
    "\n",
    "# 主程序\n",
    "if __name__ == \"__main__\":\n",
    "    # 获取和处理文本 - 可以添加多个小说文件\n",
    "    print(\"获取和处理文本...\")\n",
    "    novel_files = [\n",
    "        'doupo.txt',\n",
    "    ]\n",
    "    text = get_novel_text(novel_files)\n",
    "    text = preprocess_text(text)\n",
    "    \n",
    "    # 保存预处理后的文本以便后续使用\n",
    "    with open('preprocessed_text.txt', 'w', encoding='utf-8') as f:\n",
    "        f.write(text)\n",
    "    \n",
    "    print(f\"文本长度: {len(text)} 字符\")\n",
    "\n",
    "    # 构建词汇表\n",
    "    print(\"构建词汇表...\")\n",
    "    words, vocab, word_to_idx, idx_to_word, vocab_size = build_vocab(text, min_freq=3)\n",
    "\n",
    "    # 准备训练数据\n",
    "    print(\"准备训练数据...\")\n",
    "    seq_length = 50  # 更长的序列长度\n",
    "    batch_size = 256  # 更大的批量大小\n",
    "    dataloader = prepare_training_data(words, word_to_idx, seq_length, batch_size)\n",
    "\n",
    "    # 定义模型\n",
    "    print(\"初始化模型...\")\n",
    "    hidden_size = 512  # 更大的隐藏层\n",
    "    embedding_dim = 256  # 更大的嵌入维度\n",
    "    n_layers = 3  # 更多层\n",
    "    \n",
    "    model = EnhancedGRUModel(\n",
    "        vocab_size, \n",
    "        hidden_size, \n",
    "        embedding_dim, \n",
    "        n_layers,\n",
    "        bidirectional=False\n",
    "    )\n",
    "\n",
    "    # 训练模型或加载已有模型\n",
    "    model_path = 'novel_generator.pth'\n",
    "    if os.path.exists(model_path):\n",
    "        model = load_model(model, model_path)\n",
    "    else:\n",
    "        print(\"开始训练...\")\n",
    "        start_time = time.time()\n",
    "        model = train_model(\n",
    "            model, \n",
    "            dataloader, \n",
    "            epochs=10,  # 更多训练轮数\n",
    "            lr=0.001,\n",
    "            model_save_path=model_path\n",
    "        )\n",
    "        print(f\"训练完成，耗时: {time.time()-start_time:.2f}秒\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "a42fdf0f-2fdc-441e-9827-5ea61e211747",
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "生成文本续写:\n",
      "输入: '正是因为我废物三年，才知道什么是最重要的'\n",
      "续写结果:\n",
      "正是因为我废物三年，才知道什么是最重要的前提最具班级火法见谅不眠不休方可方可方可区域着想。抓机书阅读网之下的时候之功处。不过却不够武动乾坤。 蔚蓝轻弹，萧炎，小医仙，小医仙的声音，紧紧的盯着两团小蛇，然后猛的落在面前，旋即猛的颤抖了一点，旋即，他的攻击，他的身体，竟然是能够带来的感觉武动乾坤。 眼睛之上，两道青色火焰从指尖，旋即顺着薰儿的身影，目光中，然后投向\n",
      "--------------------------------------------------------------------------------\n"
     ]
    }
   ],
   "source": [
    "    # 生成文本示例\n",
    "    print(\"\\n生成文本续写:\")\n",
    "    test_cases = [\n",
    "        \"正是因为我废物三年，才知道什么是最重要的\",\n",
    "    ]\n",
    "\n",
    "    for test in test_cases:\n",
    "        print(f\"输入: '{test}'\")\n",
    "        generated = generate_text(\n",
    "            model, \n",
    "            test, \n",
    "            word_to_idx, \n",
    "            idx_to_word, \n",
    "            num_words=100,  # 生成长文本\n",
    "            temperature=1,\n",
    "            top_k=50\n",
    "        )\n",
    "        print(f\"续写结果:\\n{generated}\")\n",
    "        print(\"-\" * 80)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00342500-9a44-48c2-8cf4-52b83ff71b75",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
