{
 "cells": [
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "# 徒手实现 GPT\n",
    "\n",
    "来自视频 [徒手实现GPT：其实真的很简单](https://www.bilibili.com/video/BV1Hz421q7ks)"
   ],
   "id": "9bc4a94a3a6aa101"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:56.959138Z",
     "start_time": "2025-09-03T13:14:52.420286Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader\n",
    "from datasets import load_dataset\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "\n",
    "\n",
    "# 如果使用CPU，需要非常长的时间，建议减少模型规模来加快速度（比如n_layer）"
   ],
   "id": "d35202a559c2daa2",
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:56.989439Z",
     "start_time": "2025-09-03T13:14:56.963659Z"
    }
   },
   "cell_type": "code",
   "source": [
    "# 一些超参数\n",
    "emb_size = 128\n",
    "head_size = 8\n",
    "n_layer = 12\n",
    "sequence_len = 64\n",
    "learning_rate = 1e-3\n",
    "eval_iters = 20\n",
    "batch_size = 500\n",
    "# 如果有GPU，该脚本将使用GPU进行计算\n",
    "device = 'cuda' if torch.cuda.is_available() else 'cpu'"
   ],
   "id": "66e5884729a3f3cd",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:58.669977Z",
     "start_time": "2025-09-03T13:14:57.135153Z"
    }
   },
   "cell_type": "code",
   "source": [
    "datasets = load_dataset('json', data_files='./datasets/python/final/jsonl/train/*.jsonl.gz')\n",
    "datasets = datasets['train'].filter(lambda x: 'apache/spark' in x['repo'])\n",
    "\n",
    "\n",
    "class CharTokenizer:\n",
    "\n",
    "    def __init__(self, data, end_ind=0):\n",
    "        # data: list[str]\n",
    "        # 得到所有的字符\n",
    "        chars = sorted(list(set(''.join(data))))\n",
    "        self.char2ind = {s: i + 1 for i, s in enumerate(chars)}\n",
    "        self.char2ind['<|e|>'] = end_ind\n",
    "        self.ind2char = {v: k for k, v in self.char2ind.items()}\n",
    "        self.end_ind = end_ind\n",
    "\n",
    "    def encode(self, x):\n",
    "        # x: str\n",
    "        return [self.char2ind[i] for i in x]\n",
    "\n",
    "    def decode(self, x):\n",
    "        # x: int or list[x]\n",
    "        if isinstance(x, int):\n",
    "            return self.ind2char[x]\n",
    "        return [self.ind2char[i] for i in x]\n",
    "\n",
    "\n",
    "tokenizer = CharTokenizer(datasets['original_string'])\n",
    "test_str = 'def f(x):'\n",
    "re = tokenizer.encode(test_str)\n",
    "print(re)\n",
    "''.join(tokenizer.decode(range(len(tokenizer.char2ind))))"
   ],
   "id": "d5dfe8dd82cfd061",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[70, 71, 72, 2, 72, 10, 90, 11, 28]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'<|e|>\\n !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~ö'"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 3
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:58.764371Z",
     "start_time": "2025-09-03T13:14:58.678663Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def process(data, tokenizer, sequence_len=sequence_len):\n",
    "    text = data['original_string']\n",
    "    # text is list[str]\n",
    "    inputs, labels = [], []\n",
    "    for t in text:\n",
    "        enc = tokenizer.encode(t)\n",
    "        enc += [tokenizer.end_ind]\n",
    "        # 有bug，无法处理长度过小的数据\n",
    "        for i in range(len(enc) - sequence_len):\n",
    "            inputs.append(enc[i: i + sequence_len])\n",
    "            labels.append(enc[i + 1: i + 1 + sequence_len])\n",
    "    return {'inputs': inputs, 'labels': labels}\n",
    "\n",
    "\n",
    "# 将数据分为训练集和测试集\n",
    "tokenized = datasets.train_test_split(test_size=0.1, seed=1024, shuffle=True)\n",
    "\n",
    "f = lambda x: process(x, tokenizer)\n",
    "tokenized = tokenized.map(f, batched=True, remove_columns=datasets.column_names)\n",
    "tokenized.set_format(type='torch', device=device)\n"
   ],
   "id": "bddb81f4e33f93bc",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.044492Z",
     "start_time": "2025-09-03T13:14:58.772965Z"
    }
   },
   "cell_type": "code",
   "source": [
    "train_loader = DataLoader(tokenized['train'], batch_size=batch_size, shuffle=True)\n",
    "test_loader = DataLoader(tokenized['test'], batch_size=batch_size, shuffle=True)\n",
    "next(iter(train_loader))"
   ],
   "id": "854037b1b88f0876",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'inputs': tensor([[67, 78, 78,  ..., 78, 87, 79],\n",
       "         [84, 77, 53,  ...,  2,  2,  2],\n",
       "         [11, 94, 54,  ...,  2,  2,  2],\n",
       "         ...,\n",
       "         [82, 65, 68,  ..., 71, 82, 86],\n",
       "         [ 2, 85, 71,  ...,  2,  2,  2],\n",
       "         [ 2,  2,  2,  ...,  2, 67,  2]], device='cuda:0'),\n",
       " 'labels': tensor([[78, 78, 81,  ..., 87, 79, 80],\n",
       "         [77, 53, 71,  ...,  2,  2,  2],\n",
       "         [94, 54, 84,  ...,  2,  2, 19],\n",
       "         ...,\n",
       "         [65, 68, 91,  ..., 82, 86, 75],\n",
       "         [85, 71, 78,  ...,  2,  2, 72],\n",
       "         [ 2,  2,  2,  ..., 67,  2, 70]], device='cuda:0')}"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.057182Z",
     "start_time": "2025-09-03T13:14:59.051407Z"
    }
   },
   "cell_type": "code",
   "source": [
    "@torch.no_grad()\n",
    "def generate(model, context, tokenizer, max_new_tokens=300):\n",
    "    # context: (1, T)\n",
    "    #out = []\n",
    "    out = context.tolist()[0]\n",
    "    model.eval()\n",
    "    for _ in range(max_new_tokens):\n",
    "        # 由于注意力机制的长度限制，截断背景\n",
    "        logits = model(context[:, -sequence_len:])\n",
    "        probs = F.softmax(logits[:, -1, :], dim=-1)  # (1, 98)\n",
    "        # 随机生成文本\n",
    "        ix = torch.multinomial(probs, num_samples=1)  # (1, 1)\n",
    "        # 更新背景\n",
    "        context = torch.concat((context, ix), dim=-1)\n",
    "        out.append(ix.item())\n",
    "        if out[-1] == tokenizer.end_ind:\n",
    "            break\n",
    "    model.train()\n",
    "    return out"
   ],
   "id": "dc1ed9faa3671734",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.080881Z",
     "start_time": "2025-09-03T13:14:59.075647Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def estimate_loss(model):\n",
    "    re = {}\n",
    "    # 将模型切换至评估模式\n",
    "    model.eval()\n",
    "    re['train'] = _loss(model, train_loader)\n",
    "    re['test'] = _loss(model, test_loader)\n",
    "    # 将模型切换至训练模式\n",
    "    model.train()\n",
    "    return re\n",
    "\n",
    "\n",
    "@torch.no_grad()\n",
    "def _loss(model, data_loader):\n",
    "    \"\"\"\n",
    "    计算模型在不同数据集下面的评估指标\n",
    "    \"\"\"\n",
    "    loss = []\n",
    "    data_iter = iter(data_loader)\n",
    "    # 随机使用多个批量数据来预估模型效果\n",
    "    for k in range(eval_iters):\n",
    "        data = next(data_iter, None)\n",
    "        if data is None:\n",
    "            data_iter = iter(data_loader)\n",
    "            data = next(data_iter, None)\n",
    "        inputs, labels = data['inputs'], data['labels']  # (B, T)\n",
    "        logits = model(inputs)  # (B, T, vs)\n",
    "        # 请参考官方文档\n",
    "        loss.append(F.cross_entropy(logits.transpose(-2, -1), labels).item())\n",
    "    return torch.tensor(loss).mean().item()"
   ],
   "id": "652840ae40db500e",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.098535Z",
     "start_time": "2025-09-03T13:14:59.093642Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def train_model(model, optimizer, epochs=10):\n",
    "    # 记录模型在训练集上的模型损失\n",
    "    lossi = []\n",
    "    for epoch in range(epochs):\n",
    "        for i, data in enumerate(train_loader, 0):\n",
    "            inputs, labels = data['inputs'], data['labels']  # (B, T)\n",
    "            optimizer.zero_grad()\n",
    "            logits = model(inputs)  # (B, T, vs)\n",
    "            loss = F.cross_entropy(logits.transpose(-2, -1), labels)\n",
    "            lossi.append(loss.item())\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "        # 评估模型，并输出结果\n",
    "        stats = estimate_loss(model)\n",
    "        train_loss = f'train loss {stats[\"train\"]:.4f}'\n",
    "        test_loss = f'test loss {stats[\"test\"]:.4f}'\n",
    "        print(f'epoch {epoch:>2}: {train_loss}, {test_loss}')\n",
    "    return lossi"
   ],
   "id": "e395cd2812581d63",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.111676Z",
     "start_time": "2025-09-03T13:14:59.107113Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def attention(query, key, value, dropout, mask=None):\n",
    "    # query, key, value: (B, T, H)\n",
    "    # mask:                 (T, T)\n",
    "    # output:            (B, T, H)\n",
    "    B, T, H = query.shape\n",
    "    scores = query @ key.transpose(-2, -1) / H ** 0.5\n",
    "    if mask is not None:\n",
    "        scores = scores.masked_fill(mask == 0, float('-inf'))\n",
    "    w_att = F.softmax(scores, dim=-1)  # (B, T, T)\n",
    "    out = w_att @ value  # (B, T, H)\n",
    "    return out"
   ],
   "id": "d965590e3127faed",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.125248Z",
     "start_time": "2025-09-03T13:14:59.120725Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MaskedAttention(nn.Module):\n",
    "    # 单向自注意力\n",
    "\n",
    "    def __init__(self, emb_size, head_size):\n",
    "        # emb_size: C, head_size: H\n",
    "        super().__init__()\n",
    "        self.key = nn.Linear(emb_size, head_size, bias=False)\n",
    "        self.query = nn.Linear(emb_size, head_size, bias=False)\n",
    "        self.value = nn.Linear(emb_size, head_size, bias=False)\n",
    "        # 定义下三角矩阵\n",
    "        self.register_buffer('tril', torch.tril(torch.ones(sequence_len, sequence_len)))\n",
    "        self.dp = nn.Dropout(0.4)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x:   (B, T, C)\n",
    "        # out: (B, T, H)\n",
    "        B, T, C = x.shape\n",
    "        k = self.key(x)  # (B, T, H)\n",
    "        q = self.query(x)  # (B, T, H)\n",
    "        v = self.value(x)  # (B, T, H)\n",
    "        mask = self.tril[:T, :T]\n",
    "        out = attention(q, k, v, self.dp, mask)\n",
    "        return out"
   ],
   "id": "f690cbdfa632ee2e",
   "outputs": [],
   "execution_count": 10
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": [
    "![](./images/常见的GPT2.png)\n",
    "\n",
    "上图中的精确图示与原图略有差异，差异的点在于第一个块的归于化层的位置，这是因为在 GPT2 的实现中归于化层在多头注意力之前，而不是严格遵循 Transformer 的架构的，这我们致敬 GPT2 ，但是本质上这对输出的内容没有显著的影响。"
   ],
   "id": "a3df3fdb14d9c2db"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.137052Z",
     "start_time": "2025-09-03T13:14:59.132499Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MaskedMultiHeadAttention(nn.Module):\n",
    "\n",
    "    def __init__(self, emb_size, head_size):\n",
    "        super().__init__()\n",
    "\n",
    "        # 计算单头注意力个数，貌似多头注意力的出现原因之一是为了可以使残差链接可以正常进行\n",
    "        n_head = emb_size // head_size\n",
    "        heads = [MaskedAttention(emb_size, head_size) for _ in range(n_head)]\n",
    "        self.heads = nn.ModuleList(heads)  # 存储多个子模块，把管理交给 torch 实现\n",
    "        self.proj = nn.Linear(emb_size, emb_size)\n",
    "        self.dp = nn.Dropout(0.4)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, T, C)\n",
    "        # out: (B, T, C)\n",
    "        out = torch.concat([h(x) for h in self.heads], dim=-1)  # (B, T, C)\n",
    "        out = self.dp(self.proj(out))\n",
    "        return out"
   ],
   "id": "390c1174e081ce65",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.156340Z",
     "start_time": "2025-09-03T13:14:59.152966Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class FeedForward(nn.Module):\n",
    "    # 前馈神经网络（其实就是多层感知器）\n",
    "\n",
    "    def __init__(self, emb_size):\n",
    "        super().__init__()\n",
    "\n",
    "        self.ln1 = nn.Linear(emb_size, 4 * emb_size)\n",
    "        self.ln2 = nn.Linear(4 * emb_size, emb_size)\n",
    "        self.dp = nn.Dropout(0.4)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, T, C)\n",
    "        out = F.gelu(self.ln1(x))  # (B, T, C)\n",
    "        out = self.dp(self.ln2(out))  # (B, T, C)\n",
    "        return out"
   ],
   "id": "f89256cd643df29e",
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.170377Z",
     "start_time": "2025-09-03T13:14:59.165865Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class Block(nn.Module):\n",
    "\n",
    "    def __init__(self, emb_size, head_size):\n",
    "        super().__init__()\n",
    "        self.l1 = nn.LayerNorm(emb_size)\n",
    "        self.mha = MaskedMultiHeadAttention(emb_size, head_size)\n",
    "        self.l2 = nn.LayerNorm(emb_size)\n",
    "        self.ff = FeedForward(emb_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, T, C)\n",
    "        # out: (B, T, C)\n",
    "        x = x + self.mha(x)  # 不要使用 += 会打乱计算图！！！\n",
    "        x = x + self.ff(self.l2(x))\n",
    "        return x"
   ],
   "id": "80d12d06dcc0b865",
   "outputs": [],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.181674Z",
     "start_time": "2025-09-03T13:14:59.176499Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CharGPT(nn.Module):\n",
    "\n",
    "    def __init__(self, vs):\n",
    "        super().__init__()\n",
    "\n",
    "        self.token_emb = nn.Embedding(vs, emb_size)\n",
    "        self.pos_emb = nn.Embedding(sequence_len, emb_size)\n",
    "        block = [Block(emb_size, head_size) for _ in range(n_layer)]\n",
    "        self.blocks = nn.Sequential(*block)\n",
    "        self.l = nn.LayerNorm(emb_size)\n",
    "        self.lm = nn.Linear(emb_size, vs)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # x: (B, T)\n",
    "        # logits: (B, T, vs)\n",
    "        B, T = x.shape\n",
    "        pos = torch.arange(0, T, device=x.device, dtype=torch.long)  # 生成整数\n",
    "        token_embeddings = self.token_emb(x)  # (B, T, C)\n",
    "        pos_embeddings = self.pos_emb(pos)  # (B, T, C)\n",
    "        h = token_embeddings + pos_embeddings  # (B, T, C)\n",
    "        h = self.blocks(h)  # (B, T, C)\n",
    "        logits = self.lm(self.l(h))  # (B, T, vs)\n",
    "        return logits"
   ],
   "id": "e2d88c3817b7568b",
   "outputs": [],
   "execution_count": 14
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:14:59.507174Z",
     "start_time": "2025-09-03T13:14:59.191438Z"
    }
   },
   "cell_type": "code",
   "source": [
    "c_model = CharGPT(len(tokenizer.char2ind)).to(device)\n",
    "c_model, sum(p.numel() for p in c_model.parameters())"
   ],
   "id": "b8930be04e168a32",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(CharGPT(\n",
       "   (token_emb): Embedding(98, 128)\n",
       "   (pos_emb): Embedding(64, 128)\n",
       "   (blocks): Sequential(\n",
       "     (0): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (1): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (2): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (3): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (4): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (5): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (6): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (7): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (8): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (9): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (10): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "     (11): Block(\n",
       "       (l1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (mha): MaskedMultiHeadAttention(\n",
       "         (heads): ModuleList(\n",
       "           (0-15): 16 x MaskedAttention(\n",
       "             (key): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (query): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (value): Linear(in_features=128, out_features=8, bias=False)\n",
       "             (dp): Dropout(p=0.4, inplace=False)\n",
       "           )\n",
       "         )\n",
       "         (proj): Linear(in_features=128, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "       (l2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "       (ff): FeedForward(\n",
       "         (ln1): Linear(in_features=128, out_features=512, bias=True)\n",
       "         (ln2): Linear(in_features=512, out_features=128, bias=True)\n",
       "         (dp): Dropout(p=0.4, inplace=False)\n",
       "       )\n",
       "     )\n",
       "   )\n",
       "   (l): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n",
       "   (lm): Linear(in_features=128, out_features=98, bias=True)\n",
       " ),\n",
       " 2408290)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:15:02.576796Z",
     "start_time": "2025-09-03T13:14:59.583537Z"
    }
   },
   "cell_type": "code",
   "source": [
    "context = torch.tensor(tokenizer.encode('def'), device=device).unsqueeze(0)\n",
    "print(''.join(tokenizer.decode(generate(c_model, context, tokenizer))))"
   ],
   "id": "6b11481f805b32a9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "def&;*7uzYG\n",
      "M&D~Y84PT.Oa -~k%ci4|PFug!;8 jMG\n",
      "eUgl<|e|>\n"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:15:11.166422Z",
     "start_time": "2025-09-03T13:15:02.600771Z"
    }
   },
   "cell_type": "code",
   "source": "estimate_loss(c_model)",
   "id": "742302973c9e8a24",
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'train': 4.559240818023682, 'test': 4.554561614990234}"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-09-03T13:55:55.000760Z",
     "start_time": "2025-09-03T13:15:11.176289Z"
    }
   },
   "cell_type": "code",
   "source": "l = train_model(c_model, optim.AdamW(c_model.parameters(), lr=learning_rate))",
   "id": "7ac08744207acdab",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch  0: train loss 0.8103, test loss 1.0632\n",
      "epoch  1: train loss 0.6371, test loss 1.0069\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001B[31m---------------------------------------------------------------------------\u001B[39m",
      "\u001B[31mKeyboardInterrupt\u001B[39m                         Traceback (most recent call last)",
      "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[18]\u001B[39m\u001B[32m, line 1\u001B[39m\n\u001B[32m----> \u001B[39m\u001B[32m1\u001B[39m l = \u001B[43mtrain_model\u001B[49m\u001B[43m(\u001B[49m\u001B[43mc_model\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moptim\u001B[49m\u001B[43m.\u001B[49m\u001B[43mAdamW\u001B[49m\u001B[43m(\u001B[49m\u001B[43mc_model\u001B[49m\u001B[43m.\u001B[49m\u001B[43mparameters\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mlr\u001B[49m\u001B[43m=\u001B[49m\u001B[43mlearning_rate\u001B[49m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mCell\u001B[39m\u001B[36m \u001B[39m\u001B[32mIn[8]\u001B[39m\u001B[32m, line 5\u001B[39m, in \u001B[36mtrain_model\u001B[39m\u001B[34m(model, optimizer, epochs)\u001B[39m\n\u001B[32m      3\u001B[39m lossi = []\n\u001B[32m      4\u001B[39m \u001B[38;5;28;01mfor\u001B[39;00m epoch \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(epochs):\n\u001B[32m----> \u001B[39m\u001B[32m5\u001B[39m \u001B[43m    \u001B[49m\u001B[38;5;28;43;01mfor\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mi\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdata\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;129;43;01min\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[38;5;28;43menumerate\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mtrain_loader\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[32;43m0\u001B[39;49m\u001B[43m)\u001B[49m\u001B[43m:\u001B[49m\n\u001B[32m      6\u001B[39m \u001B[43m        \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mlabels\u001B[49m\u001B[43m \u001B[49m\u001B[43m=\u001B[49m\u001B[43m \u001B[49m\u001B[43mdata\u001B[49m\u001B[43m[\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43minputs\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdata\u001B[49m\u001B[43m[\u001B[49m\u001B[33;43m'\u001B[39;49m\u001B[33;43mlabels\u001B[39;49m\u001B[33;43m'\u001B[39;49m\u001B[43m]\u001B[49m\u001B[43m  \u001B[49m\u001B[38;5;66;43;03m# (B, T)\u001B[39;49;00m\n\u001B[32m      7\u001B[39m \u001B[43m        \u001B[49m\u001B[43moptimizer\u001B[49m\u001B[43m.\u001B[49m\u001B[43mzero_grad\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:733\u001B[39m, in \u001B[36m_BaseDataLoaderIter.__next__\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m    730\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m._sampler_iter \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[32m    731\u001B[39m     \u001B[38;5;66;03m# TODO(https://github.com/pytorch/pytorch/issues/76750)\u001B[39;00m\n\u001B[32m    732\u001B[39m     \u001B[38;5;28mself\u001B[39m._reset()  \u001B[38;5;66;03m# type: ignore[call-arg]\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m733\u001B[39m data = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_next_data\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    734\u001B[39m \u001B[38;5;28mself\u001B[39m._num_yielded += \u001B[32m1\u001B[39m\n\u001B[32m    735\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m (\n\u001B[32m    736\u001B[39m     \u001B[38;5;28mself\u001B[39m._dataset_kind == _DatasetKind.Iterable\n\u001B[32m    737\u001B[39m     \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m._IterableDataset_len_called \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m\n\u001B[32m    738\u001B[39m     \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m._num_yielded > \u001B[38;5;28mself\u001B[39m._IterableDataset_len_called\n\u001B[32m    739\u001B[39m ):\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\torch\\utils\\data\\dataloader.py:789\u001B[39m, in \u001B[36m_SingleProcessDataLoaderIter._next_data\u001B[39m\u001B[34m(self)\u001B[39m\n\u001B[32m    787\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34m_next_data\u001B[39m(\u001B[38;5;28mself\u001B[39m):\n\u001B[32m    788\u001B[39m     index = \u001B[38;5;28mself\u001B[39m._next_index()  \u001B[38;5;66;03m# may raise StopIteration\u001B[39;00m\n\u001B[32m--> \u001B[39m\u001B[32m789\u001B[39m     data = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_dataset_fetcher\u001B[49m\u001B[43m.\u001B[49m\u001B[43mfetch\u001B[49m\u001B[43m(\u001B[49m\u001B[43mindex\u001B[49m\u001B[43m)\u001B[49m  \u001B[38;5;66;03m# may raise StopIteration\u001B[39;00m\n\u001B[32m    790\u001B[39m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m._pin_memory:\n\u001B[32m    791\u001B[39m         data = _utils.pin_memory.pin_memory(data, \u001B[38;5;28mself\u001B[39m._pin_memory_device)\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\torch\\utils\\data\\_utils\\fetch.py:50\u001B[39m, in \u001B[36m_MapDatasetFetcher.fetch\u001B[39m\u001B[34m(self, possibly_batched_index)\u001B[39m\n\u001B[32m     48\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m.auto_collation:\n\u001B[32m     49\u001B[39m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mhasattr\u001B[39m(\u001B[38;5;28mself\u001B[39m.dataset, \u001B[33m\"\u001B[39m\u001B[33m__getitems__\u001B[39m\u001B[33m\"\u001B[39m) \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mself\u001B[39m.dataset.__getitems__:\n\u001B[32m---> \u001B[39m\u001B[32m50\u001B[39m         data = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mdataset\u001B[49m\u001B[43m.\u001B[49m\u001B[43m__getitems__\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpossibly_batched_index\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m     51\u001B[39m     \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[32m     52\u001B[39m         data = [\u001B[38;5;28mself\u001B[39m.dataset[idx] \u001B[38;5;28;01mfor\u001B[39;00m idx \u001B[38;5;129;01min\u001B[39;00m possibly_batched_index]\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\arrow_dataset.py:2863\u001B[39m, in \u001B[36mDataset.__getitems__\u001B[39m\u001B[34m(self, keys)\u001B[39m\n\u001B[32m   2861\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34m__getitems__\u001B[39m(\u001B[38;5;28mself\u001B[39m, keys: \u001B[38;5;28mlist\u001B[39m) -> \u001B[38;5;28mlist\u001B[39m:\n\u001B[32m   2862\u001B[39m \u001B[38;5;250m    \u001B[39m\u001B[33;03m\"\"\"Can be used to get a batch using a list of integers indices.\"\"\"\u001B[39;00m\n\u001B[32m-> \u001B[39m\u001B[32m2863\u001B[39m     batch = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[34;43m__getitem__\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mkeys\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m   2864\u001B[39m     n_examples = \u001B[38;5;28mlen\u001B[39m(batch[\u001B[38;5;28mnext\u001B[39m(\u001B[38;5;28miter\u001B[39m(batch))])\n\u001B[32m   2865\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m [{col: array[i] \u001B[38;5;28;01mfor\u001B[39;00m col, array \u001B[38;5;129;01min\u001B[39;00m batch.items()} \u001B[38;5;28;01mfor\u001B[39;00m i \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(n_examples)]\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\arrow_dataset.py:2859\u001B[39m, in \u001B[36mDataset.__getitem__\u001B[39m\u001B[34m(self, key)\u001B[39m\n\u001B[32m   2857\u001B[39m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m._format_type \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;129;01mor\u001B[39;00m \u001B[38;5;28mself\u001B[39m._format_type \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;129;01min\u001B[39;00m (\u001B[33m\"\u001B[39m\u001B[33marrow\u001B[39m\u001B[33m\"\u001B[39m, \u001B[33m\"\u001B[39m\u001B[33mpandas\u001B[39m\u001B[33m\"\u001B[39m, \u001B[33m\"\u001B[39m\u001B[33mpolars\u001B[39m\u001B[33m\"\u001B[39m):\n\u001B[32m   2858\u001B[39m         \u001B[38;5;28;01mreturn\u001B[39;00m Column(\u001B[38;5;28mself\u001B[39m, key)\n\u001B[32m-> \u001B[39m\u001B[32m2859\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_getitem\u001B[49m\u001B[43m(\u001B[49m\u001B[43mkey\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\arrow_dataset.py:2841\u001B[39m, in \u001B[36mDataset._getitem\u001B[39m\u001B[34m(self, key, **kwargs)\u001B[39m\n\u001B[32m   2839\u001B[39m formatter = get_formatter(format_type, features=\u001B[38;5;28mself\u001B[39m._info.features, **format_kwargs)\n\u001B[32m   2840\u001B[39m pa_subtable = query_table(\u001B[38;5;28mself\u001B[39m._data, key, indices=\u001B[38;5;28mself\u001B[39m._indices)\n\u001B[32m-> \u001B[39m\u001B[32m2841\u001B[39m formatted_output = \u001B[43mformat_table\u001B[49m\u001B[43m(\u001B[49m\n\u001B[32m   2842\u001B[39m \u001B[43m    \u001B[49m\u001B[43mpa_subtable\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mkey\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mformatter\u001B[49m\u001B[43m=\u001B[49m\u001B[43mformatter\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mformat_columns\u001B[49m\u001B[43m=\u001B[49m\u001B[43mformat_columns\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43moutput_all_columns\u001B[49m\u001B[43m=\u001B[49m\u001B[43moutput_all_columns\u001B[49m\n\u001B[32m   2843\u001B[39m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m   2844\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m formatted_output\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\formatting.py:657\u001B[39m, in \u001B[36mformat_table\u001B[39m\u001B[34m(table, key, formatter, format_columns, output_all_columns)\u001B[39m\n\u001B[32m    655\u001B[39m python_formatter = PythonFormatter(features=formatter.features)\n\u001B[32m    656\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m format_columns \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m657\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mformatter\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpa_table\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mquery_type\u001B[49m\u001B[43m=\u001B[49m\u001B[43mquery_type\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    658\u001B[39m \u001B[38;5;28;01melif\u001B[39;00m query_type == \u001B[33m\"\u001B[39m\u001B[33mcolumn\u001B[39m\u001B[33m\"\u001B[39m:\n\u001B[32m    659\u001B[39m     \u001B[38;5;28;01mif\u001B[39;00m key \u001B[38;5;129;01min\u001B[39;00m format_columns:\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\formatting.py:414\u001B[39m, in \u001B[36mFormatter.__call__\u001B[39m\u001B[34m(self, pa_table, query_type)\u001B[39m\n\u001B[32m    412\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m.format_column(pa_table)\n\u001B[32m    413\u001B[39m \u001B[38;5;28;01melif\u001B[39;00m query_type == \u001B[33m\"\u001B[39m\u001B[33mbatch\u001B[39m\u001B[33m\"\u001B[39m:\n\u001B[32m--> \u001B[39m\u001B[32m414\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mformat_batch\u001B[49m\u001B[43m(\u001B[49m\u001B[43mpa_table\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\torch_formatter.py:124\u001B[39m, in \u001B[36mTorchFormatter.format_batch\u001B[39m\u001B[34m(self, pa_table)\u001B[39m\n\u001B[32m    122\u001B[39m batch = \u001B[38;5;28mself\u001B[39m.numpy_arrow_extractor().extract_batch(pa_table)\n\u001B[32m    123\u001B[39m batch = \u001B[38;5;28mself\u001B[39m.python_features_decoder.decode_batch(batch)\n\u001B[32m--> \u001B[39m\u001B[32m124\u001B[39m batch = \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mrecursive_tensorize\u001B[49m\u001B[43m(\u001B[49m\u001B[43mbatch\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    125\u001B[39m \u001B[38;5;28;01mfor\u001B[39;00m column_name \u001B[38;5;129;01min\u001B[39;00m batch:\n\u001B[32m    126\u001B[39m     batch[column_name] = \u001B[38;5;28mself\u001B[39m._consolidate(batch[column_name])\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\torch_formatter.py:107\u001B[39m, in \u001B[36mTorchFormatter.recursive_tensorize\u001B[39m\u001B[34m(self, data_struct)\u001B[39m\n\u001B[32m    106\u001B[39m \u001B[38;5;28;01mdef\u001B[39;00m\u001B[38;5;250m \u001B[39m\u001B[34mrecursive_tensorize\u001B[39m(\u001B[38;5;28mself\u001B[39m, data_struct: \u001B[38;5;28mdict\u001B[39m):\n\u001B[32m--> \u001B[39m\u001B[32m107\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mmap_nested\u001B[49m\u001B[43m(\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_recursive_tensorize\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdata_struct\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mmap_list\u001B[49m\u001B[43m=\u001B[49m\u001B[38;5;28;43;01mFalse\u001B[39;49;00m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\utils\\py_utils.py:520\u001B[39m, in \u001B[36mmap_nested\u001B[39m\u001B[34m(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, parallel_min_length, batched, batch_size, types, disable_tqdm, desc)\u001B[39m\n\u001B[32m    518\u001B[39m         batch_size = \u001B[38;5;28mmax\u001B[39m(\u001B[38;5;28mlen\u001B[39m(iterable) // num_proc + \u001B[38;5;28mint\u001B[39m(\u001B[38;5;28mlen\u001B[39m(iterable) % num_proc > \u001B[32m0\u001B[39m), \u001B[32m1\u001B[39m)\n\u001B[32m    519\u001B[39m     iterable = \u001B[38;5;28mlist\u001B[39m(iter_batched(iterable, batch_size))\n\u001B[32m--> \u001B[39m\u001B[32m520\u001B[39m mapped = \u001B[43m[\u001B[49m\n\u001B[32m    521\u001B[39m \u001B[43m    \u001B[49m\u001B[43m_single_map_nested\u001B[49m\u001B[43m(\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfunction\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mobj\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbatched\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbatch_size\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtypes\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    522\u001B[39m \u001B[43m    \u001B[49m\u001B[38;5;28;43;01mfor\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mobj\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;129;43;01min\u001B[39;49;00m\u001B[43m \u001B[49m\u001B[43mhf_tqdm\u001B[49m\u001B[43m(\u001B[49m\u001B[43miterable\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdisable\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdisable_tqdm\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mdesc\u001B[49m\u001B[43m=\u001B[49m\u001B[43mdesc\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    523\u001B[39m \u001B[43m\u001B[49m\u001B[43m]\u001B[49m\n\u001B[32m    524\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m batched:\n\u001B[32m    525\u001B[39m     mapped = [mapped_item \u001B[38;5;28;01mfor\u001B[39;00m mapped_batch \u001B[38;5;129;01min\u001B[39;00m mapped \u001B[38;5;28;01mfor\u001B[39;00m mapped_item \u001B[38;5;129;01min\u001B[39;00m mapped_batch]\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\utils\\py_utils.py:521\u001B[39m, in \u001B[36m<listcomp>\u001B[39m\u001B[34m(.0)\u001B[39m\n\u001B[32m    518\u001B[39m         batch_size = \u001B[38;5;28mmax\u001B[39m(\u001B[38;5;28mlen\u001B[39m(iterable) // num_proc + \u001B[38;5;28mint\u001B[39m(\u001B[38;5;28mlen\u001B[39m(iterable) % num_proc > \u001B[32m0\u001B[39m), \u001B[32m1\u001B[39m)\n\u001B[32m    519\u001B[39m     iterable = \u001B[38;5;28mlist\u001B[39m(iter_batched(iterable, batch_size))\n\u001B[32m    520\u001B[39m mapped = [\n\u001B[32m--> \u001B[39m\u001B[32m521\u001B[39m     \u001B[43m_single_map_nested\u001B[49m\u001B[43m(\u001B[49m\u001B[43m(\u001B[49m\u001B[43mfunction\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mobj\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbatched\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mbatch_size\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtypes\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43;01mNone\u001B[39;49;00m\u001B[43m)\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    522\u001B[39m     \u001B[38;5;28;01mfor\u001B[39;00m obj \u001B[38;5;129;01min\u001B[39;00m hf_tqdm(iterable, disable=disable_tqdm, desc=desc)\n\u001B[32m    523\u001B[39m ]\n\u001B[32m    524\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m batched:\n\u001B[32m    525\u001B[39m     mapped = [mapped_item \u001B[38;5;28;01mfor\u001B[39;00m mapped_batch \u001B[38;5;129;01min\u001B[39;00m mapped \u001B[38;5;28;01mfor\u001B[39;00m mapped_item \u001B[38;5;129;01min\u001B[39;00m mapped_batch]\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\utils\\py_utils.py:382\u001B[39m, in \u001B[36m_single_map_nested\u001B[39m\u001B[34m(args)\u001B[39m\n\u001B[32m    380\u001B[39m         \u001B[38;5;28;01mreturn\u001B[39;00m function([data_struct])[\u001B[32m0\u001B[39m]\n\u001B[32m    381\u001B[39m     \u001B[38;5;28;01melse\u001B[39;00m:\n\u001B[32m--> \u001B[39m\u001B[32m382\u001B[39m         \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mfunction\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdata_struct\u001B[49m\u001B[43m)\u001B[49m\n\u001B[32m    383\u001B[39m \u001B[38;5;28;01mif\u001B[39;00m (\n\u001B[32m    384\u001B[39m     batched\n\u001B[32m    385\u001B[39m     \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(data_struct, \u001B[38;5;28mdict\u001B[39m)\n\u001B[32m    386\u001B[39m     \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(data_struct, types)\n\u001B[32m    387\u001B[39m     \u001B[38;5;129;01mand\u001B[39;00m \u001B[38;5;28mall\u001B[39m(\u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(v, (\u001B[38;5;28mdict\u001B[39m, types)) \u001B[38;5;28;01mfor\u001B[39;00m v \u001B[38;5;129;01min\u001B[39;00m data_struct)\n\u001B[32m    388\u001B[39m ):\n\u001B[32m    389\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m [mapped_item \u001B[38;5;28;01mfor\u001B[39;00m batch \u001B[38;5;129;01min\u001B[39;00m iter_batched(data_struct, batch_size) \u001B[38;5;28;01mfor\u001B[39;00m mapped_item \u001B[38;5;129;01min\u001B[39;00m function(batch)]\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\torch_formatter.py:104\u001B[39m, in \u001B[36mTorchFormatter._recursive_tensorize\u001B[39m\u001B[34m(self, data_struct)\u001B[39m\n\u001B[32m    102\u001B[39m \u001B[38;5;28;01melif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(data_struct, (\u001B[38;5;28mlist\u001B[39m, \u001B[38;5;28mtuple\u001B[39m)):\n\u001B[32m    103\u001B[39m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28mself\u001B[39m._consolidate([\u001B[38;5;28mself\u001B[39m.recursive_tensorize(substruct) \u001B[38;5;28;01mfor\u001B[39;00m substruct \u001B[38;5;129;01min\u001B[39;00m data_struct])\n\u001B[32m--> \u001B[39m\u001B[32m104\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43m_tensorize\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdata_struct\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[36mFile \u001B[39m\u001B[32mE:\\Programming\\Python\\DeepLearning\\venv\\Lib\\site-packages\\datasets\\formatting\\torch_formatter.py:90\u001B[39m, in \u001B[36mTorchFormatter._tensorize\u001B[39m\u001B[34m(self, value)\u001B[39m\n\u001B[32m     87\u001B[39m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28misinstance\u001B[39m(value, (VideoDecoder, AudioDecoder)):\n\u001B[32m     88\u001B[39m         \u001B[38;5;28;01mreturn\u001B[39;00m value  \u001B[38;5;66;03m# TODO(QL): set output to jax arrays ?\u001B[39;00m\n\u001B[32m---> \u001B[39m\u001B[32m90\u001B[39m \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mtorch\u001B[49m\u001B[43m.\u001B[49m\u001B[43mtensor\u001B[49m\u001B[43m(\u001B[49m\u001B[43mvalue\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43m{\u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[43mdefault_dtype\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43m*\u001B[49m\u001B[43m*\u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m.\u001B[49m\u001B[43mtorch_tensor_kwargs\u001B[49m\u001B[43m}\u001B[49m\u001B[43m)\u001B[49m\n",
      "\u001B[31mKeyboardInterrupt\u001B[39m: "
     ]
    }
   ],
   "execution_count": 18
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
