{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 一定程度上的并行\n",
    "\n",
    "来自视频 [徒手实现深度循环神经网络--大语言模型的雏形](https://www.bilibili.com/video/BV1uS421o7ts)"
   ],
   "id": "873817dd4987b014"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T06:48:48.580182Z",
     "start_time": "2025-09-01T06:48:35.360823Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from Tools.scripts.combinerefs import combine\n",
    "from torch.utils.data import DataLoader\n",
    "from datasets import load_dataset\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "\n",
    "torch.manual_seed(12046)"
   ],
   "id": "7373d72a70d030b4",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<torch._C.Generator at 0x17afb66a6d0>"
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T06:48:48.711105Z",
     "start_time": "2025-09-01T06:48:48.620648Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 一些超参数\n",
    "learning_rate = 1e-3\n",
    "eval_iters = 10\n",
    "batch_size=1000\n",
    "sequence_len=64\n",
    "# 如果有GPU，该脚本将使用GPU进行计算\n",
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'"
   ],
   "id": "c1a1a12159c67fea",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T06:48:50.875293Z",
     "start_time": "2025-09-01T06:48:49.102321Z"
    }
   },
   "cell_type": "code",
   "source": [
    "datasets = load_dataset('json', data_files='./datasets/python/final/jsonl/train/*.jsonl.gz')\n",
    "datasets = datasets['train'].filter(lambda x: 'apache/spark' in x['repo'])"
   ],
   "id": "f5acbc07ef2d3820",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T06:48:50.925663Z",
     "start_time": "2025-09-01T06:48:50.881870Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CharTokenizer:\n",
    "\n",
    "    def __init__(self, data, end_ind=0):\n",
    "        # data: list[str]\n",
    "        # 得到所有的字符\n",
    "        chars = sorted(list(set(''.join(data))))\n",
    "        self.char2ind = {s: i + 1 for i, s in enumerate(chars)}\n",
    "        self.char2ind['<|e|>'] = end_ind\n",
    "        self.ind2char = {v: k for k, v in self.char2ind.items()}\n",
    "        self.end_ind = end_ind\n",
    "\n",
    "    def encode(self, x):\n",
    "        # x: str\n",
    "        return [self.char2ind[i] for i in x]\n",
    "\n",
    "    def decode(self, x):\n",
    "        # x: int or list[x]\n",
    "        if isinstance(x, int):\n",
    "            return self.ind2char[x]\n",
    "        return [self.ind2char[i] for i in x]\n",
    "\n",
    "tokenizer = CharTokenizer(datasets['original_string'])\n",
    "test_str = 'def f(x):'\n",
    "re = tokenizer.encode(test_str)\n",
    "print(re)\n",
    "''.join(tokenizer.decode(range(len(tokenizer.char2ind))))"
   ],
   "id": "4ab586bd5000a760",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[70, 71, 72, 2, 72, 10, 90, 11, 28]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'<|e|>\\n !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ö'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T08:26:46.907956Z",
     "start_time": "2025-09-01T08:26:46.889329Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class RNN(nn.Module):\n",
    "\n",
    "    def __init__(self, input_size, hidden_size):\n",
    "        super().__init__()\n",
    "        self.input_size = input_size\n",
    "        self.hidden_size = hidden_size\n",
    "        self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n",
    "\n",
    "\n",
    "    def forward(self, input, hidden=None):\n",
    "        # input: (B, T, C)  # T 是序列的长度\n",
    "        # hidden: (B, H)  # H 是隐藏层的大小\n",
    "        # out: (B, T, H)\n",
    "\n",
    "        B, T, C = input.shape\n",
    "        re = []\n",
    "\n",
    "        if hidden is None:\n",
    "            hidden = self.init_hidden(B, input.device)\n",
    "\n",
    "        # 沿着第二维的维度进行循环，并同时处理 B 次\n",
    "        for i in range(T):\n",
    "            # 要求序列一定要为 T 长\n",
    "            combined = torch.concat((input[:, i, :], hidden), dim=-1)  # 以 , 左右没每一个维度，第一维全部取出，第二维度选择第 i 个元素，第三位全部取出，这里的意思是在每一个维度中，都取出第 i 个内容，并且第 i 个内容不变，因为只有三维可以理解为横着切了一刀  (B, C + H)\n",
    "            hidden = F.relu(self.i2h(combined))  # (B, H)\n",
    "            re.append(hidden)\n",
    "\n",
    "        return torch.stack(re, dim=1)  # (B, T, H)\n",
    "\n",
    "    def init_hidden(self, B, device):\n",
    "        return torch.zeros((B, self.hidden_size)).to(device)\n",
    "\n"
   ],
   "id": "9e37f7d6990b9649",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T08:29:44.033767Z",
     "start_time": "2025-09-01T08:29:44.027570Z"
    }
   },
   "cell_type": "code",
   "source": [
    "### torch stack 展示\n",
    "a = torch.zeros(3, 4)\n",
    "b = a + 1\n",
    "c = torch.stack([a, b], dim=1)\n",
    "\n",
    "print(a)\n",
    "print(b)\n",
    "print(c)"
   ],
   "id": "28d4e1e5a10a091d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "tensor([[0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.],\n",
      "        [0., 0., 0., 0.]])\n",
      "tensor([[1., 1., 1., 1.],\n",
      "        [1., 1., 1., 1.],\n",
      "        [1., 1., 1., 1.]])\n",
      "tensor([[[0., 0., 0., 0.],\n",
      "         [1., 1., 1., 1.]],\n",
      "\n",
      "        [[0., 0., 0., 0.],\n",
      "         [1., 1., 1., 1.]],\n",
      "\n",
      "        [[0., 0., 0., 0.],\n",
      "         [1., 1., 1., 1.]]])\n"
     ]
    }
   ],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T08:30:20.392501Z",
     "start_time": "2025-09-01T08:30:20.344236Z"
    }
   },
   "cell_type": "code",
   "source": [
    "r = RNN(3, 4)\n",
    "x = torch.randn(5, 2, 3)\n",
    "r(x).shape"
   ],
   "id": "eecdfa1c3233fab3",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([5, 2, 4])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:17:31.965105Z",
     "start_time": "2025-09-01T09:17:31.960077Z"
    }
   },
   "cell_type": "code",
   "source": [
    "### 数据填充：需要填充/需要截断\n",
    "### 层归一化：https://pytorch.org/docs/stable/generated/torch.nn.LayerNorm.html\n",
    "class CharRNNBatch(nn.Module):\n",
    "\n",
    "    def __init__(self, vs):\n",
    "        super().__init__()\n",
    "        emb_size = 256\n",
    "        hidden_size = 128\n",
    "        self.emb = nn.Embedding(vs, emb_size)\n",
    "        self.rnn1 = RNN(emb_size, hidden_size)\n",
    "        self.ln1 = nn.LayerNorm(hidden_size)\n",
    "        self.rnn2 = RNN(hidden_size, hidden_size)\n",
    "        self.ln2 = nn.LayerNorm(hidden_size)\n",
    "        self.lm = nn.Linear(hidden_size, vs)\n",
    "        self.dp = nn.Dropout(0.4)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, T)\n",
    "        # 暂不实现初始隐藏状态的输入\n",
    "        B = x.shape[0]\n",
    "        embeddings = self.emb(x)  # (B, T, emb_size)\n",
    "        h = F.relu(self.ln1(self.rnn1(embeddings)))  # (B, T, hidden_size)\n",
    "        h = self.dp(h)\n",
    "        h = F.relu(self.ln2(self.rnn2(h)))  # (B, T, hidden_size)\n",
    "        h = self.dp(h)\n",
    "        out = self.lm(h)\n",
    "        return out\n"
   ],
   "id": "8b477fd3e69622c6",
   "outputs": [],
   "execution_count": 20
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:17:34.073608Z",
     "start_time": "2025-09-01T09:17:34.066362Z"
    }
   },
   "cell_type": "code",
   "source": [
    "c_model = CharRNNBatch(len(tokenizer.char2ind)).to(device)\n",
    "c_model"
   ],
   "id": "402df74378356e32",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "CharRNNBatch(\n",
       "  (emb): Embedding(98, 256)\n",
       "  (rnn1): RNN(\n",
       "    (i2h): Linear(in_features=384, out_features=128, bias=True)\n",
       "  )\n",
       "  (ln1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "  (rnn2): RNN(\n",
       "    (i2h): Linear(in_features=256, out_features=128, bias=True)\n",
       "  )\n",
       "  (ln2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "  (lm): Linear(in_features=128, out_features=98, bias=True)\n",
       "  (dp): Dropout(p=0.4, inplace=False)\n",
       ")"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 21
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:17:35.344411Z",
     "start_time": "2025-09-01T09:17:35.339719Z"
    }
   },
   "cell_type": "code",
   "source": [
    "@torch.no_grad()\n",
    "def generate(model, context, tokenizer, max_new_tokens=300):\n",
    "    # context: (1, T)\n",
    "    #out = []\n",
    "    out = context.tolist()[0]\n",
    "    model.eval()\n",
    "    for _ in range(max_new_tokens):\n",
    "        #可以考虑截断背景，使得文本生成更加贴近训练\n",
    "        #logits = model(context[:, -sequence_len:])\n",
    "        logits = model(context)            # (1, T, 98)\n",
    "        probs = F.softmax(logits[:, -1, :], dim=-1)  # (1, 98)\n",
    "        # 随机生成文本\n",
    "        ix = torch.multinomial(probs, num_samples=1)  # (1, 1)\n",
    "        # 更新背景\n",
    "        context = torch.concat((context, ix), dim=-1)\n",
    "        out.append(ix.item())\n",
    "        if out[-1] == tokenizer.end_ind:\n",
    "            break\n",
    "    model.train()\n",
    "    return out"
   ],
   "id": "fe0c5b22090afedd",
   "outputs": [],
   "execution_count": 22
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:17:44.235204Z",
     "start_time": "2025-09-01T09:17:37.011150Z"
    }
   },
   "cell_type": "code",
   "source": [
    "context = torch.tensor(tokenizer.encode('def'), device=device).unsqueeze(0)\n",
    "print(''.join(tokenizer.decode(generate(c_model, context, tokenizer))))"
   ],
   "id": "fb248571cef15b7",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "def*ZO(5/o|(\"YP{BoE ?|uw=3:1'L$?Q9NN[-K|=CK|AM:iKca\"|+Q3-<sA*gWS$ö0NG!q9T3\"y~m5-a)'W~]\\rm&B\"%{r\n",
      "c\"i,k^DAx1zk@}@*N\n",
      "L\"jIT^~JuciGPi&.Qp!)_a_GB_*zC!la#,p=84WVJk%ycbyJ{sXK$>cYtd\"c!E&/zG^K~>A8'N]^~Di\"~]\"/N/5N!^-iVo6ZMa`ösTM'>#)C,n4JNP\n",
      "pZ\\[DKWFp`\n",
      "\"omF^2drBZT08]byU%+bk7pc^&GG%0E$bm{og$O5+:0_JT49/EpB\\roW&\\JnJuk\n"
     ]
    }
   ],
   "execution_count": 23
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:18:32.409291Z",
     "start_time": "2025-09-01T09:18:32.404028Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def process(data, tokenizer, sequence_len=sequence_len):\n",
    "    text = data['original_string']\n",
    "    # text is list[str]\n",
    "    inputs, labels = [], []\n",
    "    for t in text:\n",
    "        enc = tokenizer.encode(t)\n",
    "        enc += [tokenizer.end_ind]\n",
    "        # 有bug，无法处理长度过小的数据\n",
    "        for i in range(len(enc) - sequence_len):\n",
    "            inputs.append(enc[i: i + sequence_len])\n",
    "            labels.append(enc[i + 1: i + 1 + sequence_len])\n",
    "    return {'inputs': inputs, 'labels': labels}"
   ],
   "id": "6b8b73d25ee14de0",
   "outputs": [],
   "execution_count": 25
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:18:52.821724Z",
     "start_time": "2025-09-01T09:18:52.736970Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 将数据分为训练集和测试集\n",
    "tokenized = datasets.train_test_split(test_size=0.1, seed=1024, shuffle=True)\n",
    "\n",
    "f = lambda x: process(x, tokenizer)\n",
    "tokenized = tokenized.map(f, batched=True, remove_columns=datasets.column_names)\n",
    "tokenized.set_format(type='torch', device=device)"
   ],
   "id": "ee8e65e3f4dded06",
   "outputs": [],
   "execution_count": 27
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:19:23.802848Z",
     "start_time": "2025-09-01T09:19:23.798288Z"
    }
   },
   "cell_type": "code",
   "source": [
    "train_loader = DataLoader(tokenized['train'], batch_size=batch_size, shuffle=True)\n",
    "test_loader = DataLoader(tokenized['test'], batch_size=batch_size, shuffle=True)"
   ],
   "id": "ffcd28308a35a1aa",
   "outputs": [],
   "execution_count": 28
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:19:32.359372Z",
     "start_time": "2025-09-01T09:19:32.297634Z"
    }
   },
   "cell_type": "code",
   "source": "next(iter(train_loader))",
   "id": "e7bfc680dbc77b6c",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'inputs': tensor([[ 2,  2, 76,  ..., 73,  2, 31],\n",
       "         [71,  2, 81,  ..., 81,  2, 85],\n",
       "         [67, 86, 31,  ..., 77, 16, 85],\n",
       "         ...,\n",
       "         [10, 10, 85,  ..., 85, 71, 78],\n",
       "         [10, 61, 19,  ..., 65, 84, 71],\n",
       "         [20, 18, 19,  ..., 74, 71, 84]], device='cuda:0'),\n",
       " 'labels': tensor([[ 2, 76, 69,  ...,  2, 31,  2],\n",
       "         [ 2, 81, 72,  ...,  2, 85, 86],\n",
       "         [86, 31, 48,  ..., 16, 85, 83],\n",
       "         ...,\n",
       "         [10, 85, 71,  ..., 71, 78, 72],\n",
       "         [61, 19, 14,  ..., 84, 71, 79],\n",
       "         [18, 19, 23,  ..., 71, 84,  1]], device='cuda:0')}"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 30
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:20:08.632506Z",
     "start_time": "2025-09-01T09:20:07.299098Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def estimate_loss(model):\n",
    "    re = {}\n",
    "    # 将模型切换至评估模式\n",
    "    model.eval()\n",
    "    re['train'] = _loss(model, train_loader)\n",
    "    re['test'] = _loss(model, test_loader)\n",
    "    # 将模型切换至训练模式\n",
    "    model.train()\n",
    "    return re\n",
    "\n",
    "@torch.no_grad()\n",
    "def _loss(model, data_loader):\n",
    "    \"\"\"\n",
    "    计算模型在不同数据集下面的评估指标\n",
    "    \"\"\"\n",
    "    loss = []\n",
    "    data_iter= iter(data_loader)\n",
    "    # 随机使用多个批量数据来预估模型效果\n",
    "    for k in range(eval_iters):\n",
    "        data = next(data_iter, None)\n",
    "        if data is None:\n",
    "            data_iter = iter(data_loader)\n",
    "            data = next(data_iter, None)\n",
    "        inputs, labels = data['inputs'], data['labels']  # (B, T)\n",
    "        logits = model(inputs)                           # (B, T, vs)\n",
    "        # 请参考官方文档\n",
    "        loss.append(F.cross_entropy(logits.transpose(-2, -1), labels).item())\n",
    "    return torch.tensor(loss).mean().item()\n",
    "\n",
    "estimate_loss(c_model)"
   ],
   "id": "5e0a7d316e3a4906",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'train': 4.72510290145874, 'test': 4.7315497398376465}"
      ]
     },
     "execution_count": 31,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 31
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:21:29.746078Z",
     "start_time": "2025-09-01T09:21:29.739651Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def train_model(model, optimizer, epochs=10):\n",
    "    # 记录模型在训练集上的模型损失\n",
    "    lossi = []\n",
    "    for epoch in range(epochs):\n",
    "        for i, data in enumerate(train_loader, 0):\n",
    "            inputs, labels = data['inputs'], data['labels']  # (B, T)\n",
    "            optimizer.zero_grad()\n",
    "            logits = model(inputs)                           # (B, T, vs)\n",
    "            loss = F.cross_entropy(logits.transpose(-2, -1), labels)\n",
    "            lossi.append(loss.item())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "        # 评估模型，并输出结果\n",
    "        stats = estimate_loss(model)\n",
    "        train_loss = f'train loss {stats[\"train\"]:.4f}'\n",
    "        test_loss = f'test loss {stats[\"test\"]:.4f}'\n",
    "        print(f'epoch {epoch:>2}: {train_loss}, {test_loss}')\n",
    "    return lossi"
   ],
   "id": "ecb8dcb4cf695587",
   "outputs": [],
   "execution_count": 32
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-01T09:47:00.679484Z",
     "start_time": "2025-09-01T09:21:39.475034Z"
    }
   },
   "cell_type": "code",
   "source": [
    "### PyTorch的RNN实现：https://pytorch.org/docs/stable/generated/torch.nn.RNN.html\n",
    "l = train_model(c_model, optim.Adam(c_model.parameters(), lr=learning_rate))"
   ],
   "id": "f1b77413e6338d8",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch  0: train loss 1.4161, test loss 1.5274\n",
      "epoch  1: train loss 1.3095, test loss 1.4331\n",
      "epoch  2: train loss 1.2703, test loss 1.4028\n",
      "epoch  3: train loss 1.2404, test loss 1.3754\n",
      "epoch  4: train loss 1.2382, test loss 1.3631\n",
      "epoch  5: train loss 1.2157, test loss 1.3591\n",
      "epoch  6: train loss 1.2088, test loss 1.3505\n",
      "epoch  7: train loss 1.2071, test loss 1.3348\n",
      "epoch  8: train loss 1.1915, test loss 1.3374\n",
      "epoch  9: train loss 1.1929, test loss 1.3267\n"
     ]
    }
   ],
   "execution_count": 33
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "context = torch.tensor(tokenizer.encode('def'), device=device).unsqueeze(0)\n",
    "print(''.join(tokenizer.decode(generate(c_model, context, tokenizer))))"
   ],
   "id": "6c4babf0d9ea972"
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "plt.plot(torch.tensor(l).view(-1, 10).mean(dim=-1))",
   "id": "ec41d1cf713c0ec3"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
