{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "5a1368f0",
   "metadata": {},
   "source": [
    "# 作业5：RNN 生成模型"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "997082ed",
   "metadata": {},
   "source": [
    "以 `data/names.txt` 中的英文名作为训练集，利用 RNN 或 LSTM 等方法对字母序列数据进行建模，然后使用拟合的模型随机生成20个名字。本次作业为开放式，不指定各类超参数（如网络结构、学习率、迭代次数等），但需提供必要的输出和诊断结果支持你的选择（如模型是否收敛、效果评价等）。"
   ]
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-09T05:44:53.753665Z",
     "start_time": "2025-06-09T05:44:42.678964Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import random, math, torch\n",
    "from torch import nn\n",
    "from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "with open(\"data/names.txt\", encoding=\"utf-8\") as f:\n",
    "    names = [line.strip().lower() for line in f if line.strip()]\n",
    "chars = sorted(set(\"\".join(names)));\n",
    "PAD, EOS = \"<PAD>\", \"<EOS>\"\n",
    "itos = [PAD] + chars + [EOS];\n",
    "stoi = {c: i for i, c in enumerate(itos)}\n",
    "vocab_size = len(itos)\n",
    "\n",
    "\n",
    "def encode(name): return torch.tensor([stoi[c] for c in name] + [stoi[EOS]])\n",
    "\n",
    "\n",
    "dataset = [encode(n) for n in names]\n",
    "\n",
    "\n",
    "def collate(bs):\n",
    "    bs.sort(key=len, reverse=True)\n",
    "    lens = [len(x) for x in bs]\n",
    "    pad = pad_sequence(bs, batch_first=True)\n",
    "    return pad[:, :-1], pad[:, 1:], [l - 1 for l in lens]\n",
    "\n",
    "\n",
    "loader = DataLoader(dataset, batch_size=256, shuffle=True, collate_fn=collate)\n",
    "\n",
    "\n",
    "class CharLSTM(nn.Module):\n",
    "    def __init__(self, V, emb=64, hid=128, layers=2):\n",
    "        super().__init__()\n",
    "        self.emb = nn.Embedding(V, emb, padding_idx=0)\n",
    "        self.lstm = nn.LSTM(emb, hid, layers, batch_first=True)\n",
    "        self.fc = nn.Linear(hid, V)\n",
    "\n",
    "    def forward(self, x, lens, h=None):\n",
    "        x = self.emb(x)\n",
    "        x = pack_padded_sequence(x, lens, batch_first=True)\n",
    "        o, h = self.lstm(x, h)\n",
    "        o, _ = pad_packed_sequence(o, batch_first=True)\n",
    "        return self.fc(o), h\n",
    "\n",
    "\n",
    "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
    "model = CharLSTM(vocab_size).to(device)\n",
    "opt = torch.optim.Adam(model.parameters(), lr=3e-3)\n",
    "loss_fn = nn.CrossEntropyLoss(ignore_index=0)\n",
    "\n",
    "MAX_EPOCHS = 200\n",
    "PATIENCE = 8\n",
    "min_delta = 0.005\n",
    "PRINT_EVERY = 2\n",
    "SAMPLE_EVERY = 10\n",
    "\n",
    "best_ce = float('inf');\n",
    "wait = 0\n",
    "\n",
    "\n",
    "def sample(start=None, max_len=20, temp=0.8):\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        if not start: start = random.choice(chars)\n",
    "        idx = torch.tensor([[stoi[start]]], device=device)\n",
    "        h = None;\n",
    "        out = [start]\n",
    "        for _ in range(max_len):\n",
    "            logit, h = model(idx, [1], h)\n",
    "            logit = logit[:, -1] / temp\n",
    "            nxt = torch.multinomial(torch.softmax(logit, 1), 1).item()\n",
    "            if nxt == stoi[EOS]: break\n",
    "            out.append(itos[nxt]);\n",
    "            idx = torch.tensor([[nxt]], device=device)\n",
    "    return \"\".join(out)\n",
    "\n",
    "\n",
    "for epoch in range(1, MAX_EPOCHS + 1):\n",
    "    model.train();\n",
    "    tot, tok = 0, 0\n",
    "    for x, y, lens in loader:\n",
    "        x, y = x.to(device), y.to(device)\n",
    "        logit, _ = model(x, lens)\n",
    "        loss = loss_fn(logit.reshape(-1, vocab_size), y.reshape(-1))\n",
    "        opt.zero_grad();\n",
    "        loss.backward();\n",
    "        opt.step()\n",
    "        mask = (y != 0);\n",
    "        tok += mask.sum().item()\n",
    "        tot += (loss.item() * mask.sum().item())\n",
    "\n",
    "    ce = tot / tok;\n",
    "    ppl = math.exp(ce)\n",
    "\n",
    "    improved = best_ce - ce > min_delta\n",
    "    if improved:\n",
    "        best_ce, wait = ce, 0\n",
    "    else:\n",
    "        wait += 1\n",
    "  \n",
    "    if epoch % PRINT_EVERY == 0 or epoch == 1:\n",
    "        print(f\"Epoch {epoch:3d} ,CE {ce:.4f} ,Perp {ppl:5.2f} , {'*' if improved else ''}\")\n",
    "    if epoch % SAMPLE_EVERY == 0:\n",
    "        print(\" samples:\", \", \".join(sample(temp=0.8) for _ in range(3)))\n",
    "\n",
    "    if wait >= PATIENCE:\n",
    "        print(f\"Early-stop triggered at epoch {epoch}. Best CE={best_ce:.4f}\")\n",
    "        break\n",
    "\n",
    "print(\"\\nFinal generated names:\")\n",
    "for _ in range(20):\n",
    "    print(sample(temp=0.8))\n"
   ],
   "id": "b18a4b6cfb2b17e9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch   1 ,CE 3.0108 ,Perp 20.30 , *\n",
      "Epoch   2 ,CE 2.6800 ,Perp 14.58 , *\n",
      "Epoch   4 ,CE 2.2110 ,Perp  9.12 , *\n",
      "Epoch   6 ,CE 2.0010 ,Perp  7.40 , *\n",
      "Epoch   8 ,CE 1.8869 ,Perp  6.60 , *\n",
      "Epoch  10 ,CE 1.7871 ,Perp  5.97 , *\n",
      " samples: kirk, unley, pinge\n",
      "Epoch  12 ,CE 1.7063 ,Perp  5.51 , *\n",
      "Epoch  14 ,CE 1.6339 ,Perp  5.12 , *\n",
      "Epoch  16 ,CE 1.5700 ,Perp  4.81 , *\n",
      "Epoch  18 ,CE 1.5020 ,Perp  4.49 , *\n",
      "Epoch  20 ,CE 1.4384 ,Perp  4.21 , *\n",
      " samples: peatley, dalling, oatrell\n",
      "Epoch  22 ,CE 1.3826 ,Perp  3.99 , *\n",
      "Epoch  24 ,CE 1.3235 ,Perp  3.76 , *\n",
      "Epoch  26 ,CE 1.2711 ,Perp  3.56 , *\n",
      "Epoch  28 ,CE 1.2257 ,Perp  3.41 , *\n",
      "Epoch  30 ,CE 1.1822 ,Perp  3.26 , *\n",
      " samples:  artley, mcgredy, yardley\n",
      "Epoch  32 ,CE 1.1435 ,Perp  3.14 , *\n",
      "Epoch  34 ,CE 1.1073 ,Perp  3.03 , *\n",
      "Epoch  36 ,CE 1.0819 ,Perp  2.95 , *\n",
      "Epoch  38 ,CE 1.0542 ,Perp  2.87 , *\n",
      "Epoch  40 ,CE 1.0323 ,Perp  2.81 , *\n",
      " samples: hatch, everson, jordan\n",
      "Epoch  42 ,CE 1.0154 ,Perp  2.76 , *\n",
      "Epoch  44 ,CE 0.9946 ,Perp  2.70 , *\n",
      "Epoch  46 ,CE 0.9805 ,Perp  2.67 , *\n",
      "Epoch  48 ,CE 0.9694 ,Perp  2.64 , *\n",
      "Epoch  50 ,CE 0.9555 ,Perp  2.60 , \n",
      " samples:  artley, heather, lowes\n",
      "Epoch  52 ,CE 0.9529 ,Perp  2.59 , \n",
      "Epoch  54 ,CE 0.9395 ,Perp  2.56 , \n",
      "Epoch  56 ,CE 0.9342 ,Perp  2.55 , *\n",
      "Epoch  58 ,CE 0.9267 ,Perp  2.53 , *\n",
      "Epoch  60 ,CE 0.9229 ,Perp  2.52 , \n",
      " samples: goodridge, yougge, wilkes\n",
      "Epoch  62 ,CE 0.9170 ,Perp  2.50 , \n",
      "Epoch  64 ,CE 0.9106 ,Perp  2.49 , \n",
      "Epoch  66 ,CE 0.9066 ,Perp  2.48 , \n",
      "Epoch  68 ,CE 0.9032 ,Perp  2.47 , \n",
      "Epoch  70 ,CE 0.8986 ,Perp  2.46 , *\n",
      " samples: drickland, munro,  arner\n",
      "Epoch  72 ,CE 0.8945 ,Perp  2.45 , \n",
      "Epoch  74 ,CE 0.8937 ,Perp  2.44 , \n",
      "Epoch  76 ,CE 0.8911 ,Perp  2.44 , *\n",
      "Epoch  78 ,CE 0.8877 ,Perp  2.43 , \n",
      "Epoch  80 ,CE 0.8861 ,Perp  2.43 , *\n",
      " samples: newson, oriley, downes\n",
      "Epoch  82 ,CE 0.8842 ,Perp  2.42 , \n",
      "Epoch  84 ,CE 0.8817 ,Perp  2.41 , \n",
      "Epoch  86 ,CE 0.8821 ,Perp  2.42 , \n",
      "Epoch  88 ,CE 0.8801 ,Perp  2.41 , \n",
      "Epoch  90 ,CE 0.8788 ,Perp  2.41 , \n",
      " samples: mooney,  atterson, stark\n",
      "Epoch  92 ,CE 0.8760 ,Perp  2.40 , \n",
      "Epoch  94 ,CE 0.8745 ,Perp  2.40 , *\n",
      "Epoch  96 ,CE 0.8755 ,Perp  2.40 , \n",
      "Epoch  98 ,CE 0.8739 ,Perp  2.40 , \n",
      "Epoch 100 ,CE 0.8726 ,Perp  2.39 , \n",
      " samples: kettlewell,  atterson, ireland\n",
      "Epoch 102 ,CE 0.8699 ,Perp  2.39 , \n",
      "Early-stop triggered at epoch 102. Best CE=0.8745\n",
      "\n",
      "Final generated names:\n",
      "casey\n",
      "vine\n",
      "buckley\n",
      "fitch\n",
      "xallett\n",
      "poon\n",
      "oflynn\n",
      "swales\n",
      "owings\n",
      "underwood\n",
      "acton\n",
      "hardy\n",
      "bing\n",
      "izzard\n",
      "english\n",
      "zaoui\n",
      "martinez\n",
      "zaoui\n",
      "bryant\n",
      "zaoui\n"
     ]
    }
   ],
   "execution_count": 3
  },
  {
   "cell_type": "markdown",
   "id": "85b11e45",
   "metadata": {},
   "source": [
    "提示：可以参照 `lec12-rnn-generation.zip` 中的代码，但注意英文名不需要像中文那样构建字典，因为可以直接使用26个字母作为字典。"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
