{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a86be22",
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import os\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader, random_split\n",
    "from tokenizers import Tokenizer\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 环境配置\n",
    "os.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"TRUE\"\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "# 超参数配置\n",
    "class Config:\n",
    "    # 数据参数\n",
    "    data_path = \"./processed_data.txt\"\n",
    "    max_length = 128  # 最大序列长度\n",
    "    batch_size = 32\n",
    "    train_ratio = 0.9  # 训练集比例\n",
    "    \n",
    "    # 模型参数\n",
    "    d_model = 256\n",
    "    num_heads = 8\n",
    "    num_layers = 4\n",
    "    dim_feedforward = 1024\n",
    "    dropout = 0.1\n",
    "    \n",
    "    # 训练参数\n",
    "    lr = 5e-5\n",
    "    weight_decay = 0.01\n",
    "    epochs = 20\n",
    "    warmup_steps = 4000\n",
    "    label_smoothing = 0.1\n",
    "    grad_clip = 1.0  # 梯度裁剪\n",
    "    \n",
    "    # 生成参数\n",
    "    temperature = 0.7\n",
    "    top_k = 40\n",
    "\n",
    "# 加载分词器\n",
    "tokenizer = Tokenizer.from_file(\"bpe_tokenizer.json\")\n",
    "vocab_size = tokenizer.get_vocab_size()\n",
    "bos_id = tokenizer.token_to_id(\"<bos>\")\n",
    "eos_id = tokenizer.token_to_id(\"<eos>\")\n",
    "pad_id = tokenizer.token_to_id(\"<pad>\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2cfe7601",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 增强的Transformer模型\n",
    "class EnhancedTransformer(nn.Module):\n",
    "    def __init__(self, config):\n",
    "        super().__init__()\n",
    "        self.config = config\n",
    "        \n",
    "        # 词嵌入层（带缩放）\n",
    "        self.embedding = nn.Embedding(vocab_size, config.d_model)\n",
    "        self.emb_scale = math.sqrt(config.d_model)\n",
    "        \n",
    "        # 位置编码\n",
    "        self.positional_encoding = self._init_positional_encoding()\n",
    "        \n",
    "        # Transformer编码器\n",
    "        encoder_layer = nn.TransformerEncoderLayer(\n",
    "            d_model=config.d_model,\n",
    "            nhead=config.num_heads,\n",
    "            dim_feedforward=config.dim_feedforward,\n",
    "            dropout=config.dropout,\n",
    "            activation='gelu'\n",
    "        )\n",
    "        self.encoder = nn.TransformerEncoder(encoder_layer, config.num_layers)\n",
    "        \n",
    "        # Transformer解码器\n",
    "        decoder_layer = nn.TransformerDecoderLayer(\n",
    "            d_model=config.d_model,\n",
    "            nhead=config.num_heads,\n",
    "            dim_feedforward=config.dim_feedforward,\n",
    "            dropout=config.dropout,\n",
    "            activation='gelu'\n",
    "        )\n",
    "        self.decoder = nn.TransformerDecoder(decoder_layer, config.num_layers)\n",
    "        \n",
    "        # 增强输出层\n",
    "        self.output_layer = nn.Sequential(\n",
    "            nn.Linear(config.d_model, config.dim_feedforward),\n",
    "            nn.GELU(),\n",
    "            nn.Linear(config.dim_feedforward, vocab_size))\n",
    "        \n",
    "        self.dropout = nn.Dropout(config.dropout)\n",
    "        \n",
    "        # 初始化参数\n",
    "        self._init_weights()\n",
    "\n",
    "    def _init_positional_encoding(self):\n",
    "        position = torch.arange(Config.max_length).unsqueeze(1)\n",
    "        div_term = torch.exp(torch.arange(0, self.config.d_model, 2) * \n",
    "                   (-math.log(10000.0) / self.config.d_model))\n",
    "        pe = torch.zeros(1, Config.max_length, self.config.d_model)\n",
    "        pe[0, :, 0::2] = torch.sin(position * div_term)\n",
    "        pe[0, :, 1::2] = torch.cos(position * div_term)\n",
    "        return pe.to(device)\n",
    "    \n",
    "    def _init_weights(self):\n",
    "        for p in self.parameters():\n",
    "            if p.dim() > 1:\n",
    "                nn.init.xavier_uniform_(p)\n",
    "\n",
    "    def forward(self, src, tgt):\n",
    "        # 嵌入层处理\n",
    "        src_emb = self.embedding(src) * self.emb_scale + self.positional_encoding[:, :src.size(1)]\n",
    "        tgt_emb = self.embedding(tgt) * self.emb_scale + self.positional_encoding[:, :tgt.size(1)]\n",
    "        \n",
    "        # 调整维度并添加dropout\n",
    "        src_emb = self.dropout(src_emb.transpose(0, 1))  # (S, B, D)\n",
    "        tgt_emb = self.dropout(tgt_emb.transpose(0, 1))\n",
    "        \n",
    "        # 创建掩码\n",
    "        src_mask = (src == pad_id)\n",
    "        tgt_mask = self._generate_square_subsequent_mask(tgt.size(1))\n",
    "        \n",
    "        # 编码器前向\n",
    "        memory = self.encoder(src_emb, src_key_padding_mask=src_mask)\n",
    "        \n",
    "        # 解码器前向\n",
    "        output = self.decoder(\n",
    "            tgt_emb, memory,\n",
    "            tgt_mask=tgt_mask,\n",
    "            memory_key_padding_mask=src_mask\n",
    "        )\n",
    "        \n",
    "        # 输出处理\n",
    "        output = output.transpose(0, 1)  # (B, S, D)\n",
    "        return self.output_layer(output)\n",
    "\n",
    "    def _generate_square_subsequent_mask(self, sz):\n",
    "        mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)\n",
    "        mask = mask.float().masked_fill(mask == 0, float('-inf'))\n",
    "        return mask.to(device)\n",
    "\n",
    "# 初始化模型\n",
    "config = Config()\n",
    "model = EnhancedTransformer(config).to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a41bd28d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成函数\n",
    "def generate_response(model, input_text, max_length=50):\n",
    "    model.load_state_dict(torch.load(\"best_model.pth\", map_location=device))\n",
    "    model.eval()\n",
    "    \n",
    "    # 编码输入\n",
    "    input_ids = [bos_id] + tokenizer.encode(input_text).ids[:Config.max_length-2] + [eos_id]\n",
    "    src = torch.tensor([input_ids], device=device)\n",
    "    \n",
    "    # 生成序列\n",
    "    generated = [bos_id]\n",
    "    for _ in range(max_length):\n",
    "        tgt = torch.tensor([generated], device=device)\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            output = model(src, tgt)\n",
    "            logits = output[0, -1, :] / Config.temperature\n",
    "            topk = torch.topk(logits, Config.top_k)\n",
    "            probs = torch.softmax(topk.values, dim=-1)\n",
    "            next_token = topk.indices[torch.multinomial(probs, 1)].item()\n",
    "        \n",
    "        if next_token == eos_id:\n",
    "            break\n",
    "        generated.append(next_token)\n",
    "    \n",
    "    # 解码并过滤特殊标记\n",
    "    return tokenizer.decode([t for t in generated if t not in [bos_id, eos_id, pad_id]])\n",
    "\n",
    "# 交互测试\n",
    "while True:\n",
    "    try:\n",
    "        user_input = input(\"You: \")\n",
    "        if user_input.lower() in [\"exit\", \"quit\"]:\n",
    "            break\n",
    "        response = generate_response(model, user_input)\n",
    "        print(f\"Bot: {response}\")\n",
    "    except KeyboardInterrupt:\n",
    "        break"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
