{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4",
"authorship_tag": "ABX9TyNJTibhP/0M1eYUw/zipgqC",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
""
]
},
{
"cell_type": "code",
"source": [
"from google.colab import drive\n",
"drive.mount('/content/drive')"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "6XXCT7w0YSV6",
"outputId": "7b15e501-987f-4a3b-e6a6-5cc6757c0082"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Mounted at /content/drive\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# data for training the BPE\n",
"with open('/content/drive/MyDrive/new_training_data.txt', 'r', encoding='utf-8') as file:\n",
" captions = file.read()\n",
"\n",
"print(len(captions)/1e6, 'million words')"
],
"metadata": {
"id": "xUND_jGmYUqM",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "4d9cdffa-8b3f-4967-933f-b5fef6256ca6"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"219.382798 million words\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"!pip install tiktoken"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "65WnHhhvOiXL",
"outputId": "36664169-aaa7-4a2a-e860-9ac0db1eea5b"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Collecting tiktoken\n",
" Downloading tiktoken-0.6.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (1.8 MB)\n",
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/1.8 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91m━━━━━━━━\u001b[0m\u001b[90m╺\u001b[0m\u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.4/1.8 MB\u001b[0m \u001b[31m11.0 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m33.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: regex>=2022.1.18 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2023.12.25)\n",
"Requirement already satisfied: requests>=2.26.0 in /usr/local/lib/python3.10/dist-packages (from tiktoken) (2.31.0)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (3.6)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests>=2.26.0->tiktoken) (2024.2.2)\n",
"Installing collected packages: tiktoken\n",
"Successfully installed tiktoken-0.6.0\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import timeit\n",
"start_time = timeit.default_timer()"
],
"metadata": {
"id": "OHalNluSZeZD"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import tiktoken\n",
"tokenizer = tiktoken.get_encoding(\"p50k_base\")\n",
"tokenizer = tiktoken.encoding_for_model(\"text-davinci-003\")\n",
"\n",
"input_data = tokenizer.encode(captions)\n",
"end_time = timeit.default_timer()\n",
"total_time = (end_time - start_time) / 60\n",
"\n",
"print(\"total tokens\", len(input_data)/1e6, 'million')\n",
"print(f\"time taken to train the tokenizer {total_time}mins\")"
],
"metadata": {
"id": "TtddSd8AYXTX",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "1aa24aee-edf7-4ea8-ab80-afea0beb40cb"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"total tokens 47.994108 million\n",
"time taken to train the tokenizer 0.7022969253666669mins\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"n = int(0.9*len(input_data)) # first 90% will be train, rest val\n",
"train_data = input_data[:n]\n",
"val_data = input_data[n:]\n",
"print(f\"train data {len(train_data) / 1e6} million'\\n'validation data {len(val_data) / 1e6} million\")"
],
"metadata": {
"id": "oIKFNoIjYkhi",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "c76669ed-17f6-45c5-84ff-c8d8387873d3"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"train data 43.194697 million'\n",
"'validation data 4.799411 million\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# Trimming data for a multiple of 4\n",
"# train_remainder = len(train_data) % 4\n",
"# val_remainder = len(val_data) % 4\n",
"# train_data = train_data[:-train_remainder] if train_remainder != 0 else train_data\n",
"# val_data = val_data[:-val_remainder] if val_remainder != 0 else val_data\n",
"\n",
"# print(len(train_data), len(val_data))"
],
"metadata": {
"id": "s8tNsyEhYyxz",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "075f902c-45a5-495c-c3a7-21407f40db5b"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"18692412 2076932\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import torch\n",
"\n",
"# Convert to tensors\n",
"train_data = torch.tensor(train_data, dtype=torch.long)\n",
"val_data = torch.tensor(val_data, dtype=torch.long)\n",
"\n",
"print(f\"train data = {train_data[:10]}, \\nval data = {val_data[:10]}\")"
],
"metadata": {
"id": "qevMG1AtY5Mi",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "77a4f3be-0c10-4e57-ef4e-0a4de9ea9918"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"train data = tensor([ 5195, 547, 262, 13666, 319, 1468, 640, 88, 198, 65]), \n",
"val data = tensor([ 13, 11361, 286, 23748, 13, 9078, 11, 1623, 1122, 11])\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"vocab_size = tokenizer.n_vocab\n",
"print(vocab_size)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ayXltgrIafF4",
"outputId": "74084e34-9a4e-4e34-9488-f8bfa930d5ea"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"50281\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"import torch\n",
"import torch.nn as nn\n",
"from torch.nn import functional as F\n",
"\n",
"# hyperparameters\n",
"batch_size = 32 # independent sequences process in parallel\n",
"block_size = 128 # maximum context length for predictions\n",
"max_iters = 5000\n",
"eval_interval = 100\n",
"learning_rate = 3e-4\n",
"device = 'cuda' if torch.cuda.is_available() else 'cpu'\n",
"eval_iters = 200\n",
"n_embd = 384\n",
"n_head = 8\n",
"n_layer = 8\n",
"dropout = 0.2\n",
"norm_eps = 1e-05\n",
"# ------------\n",
"\n",
"torch.manual_seed(1400)\n",
"\n",
"# data loading\n",
"def get_batch(split):\n",
" # generate a small batch of data of inputs x and targets y\n",
" data = train_data if split == 'train' else val_data\n",
" ix = torch.randint(len(data) - block_size, (batch_size,))\n",
" x = torch.stack([data[i:i+block_size] for i in ix])\n",
" y = torch.stack([data[i+1:i+block_size+1] for i in ix])\n",
" x, y = x.to(device), y.to(device)\n",
" return x, y\n",
"\n",
"@torch.no_grad()\n",
"def estimate_loss():\n",
" out = {}\n",
" model.eval()\n",
" for split in ['train', 'val']:\n",
" losses = torch.zeros(eval_iters)\n",
" for k in range(eval_iters):\n",
" X, Y = get_batch(split)\n",
" logits, loss = model(X, Y)\n",
" losses[k] = loss.item()\n",
" out[split] = losses.mean()\n",
" model.train()\n",
" return out\n",
"\n",
"class Head(nn.Module):\n",
" \"\"\" one head of self-attention \"\"\"\n",
"\n",
" def __init__(self, head_size):\n",
" super().__init__()\n",
" self.key = nn.Linear(n_embd, head_size, bias=False)\n",
" self.query = nn.Linear(n_embd, head_size, bias=False)\n",
" self.value = nn.Linear(n_embd, head_size, bias=False)\n",
" self.register_buffer('tril', torch.tril(torch.ones(block_size, block_size)))\n",
"\n",
" self.dropout = nn.Dropout(dropout)\n",
"\n",
" def forward(self, x):\n",
" # input of size (batch, time-step, channels)\n",
" # output of size (batch, time-step, head size)\n",
" B,T,C = x.shape\n",
" k = self.key(x) # (B,T,hs)\n",
" q = self.query(x) # (B,T,hs)\n",
" # compute attention scores (\"affinities\")\n",
" wei = q @ k.transpose(-2,-1) * k.shape[-1]**-0.5 # (B, T, hs) @ (B, hs, T) -> (B, T, T)\n",
" wei = wei.masked_fill(self.tril[:T, :T] == 0, float('-inf')) # (B, T, T)\n",
" wei = F.softmax(wei, dim=-1) # (B, T, T)\n",
" wei = self.dropout(wei)\n",
" # perform the weighted aggregation of the values\n",
" v = self.value(x) # (B,T,hs)\n",
" out = wei @ v # (B, T, T) @ (B, T, hs) -> (B, T, hs)\n",
" return out\n",
"\n",
"class MultiHeadAttention(nn.Module):\n",
" \"\"\" multiple heads of self-attention in parallel \"\"\"\n",
"\n",
" def __init__(self, num_heads, head_size):\n",
" super().__init__()\n",
" self.heads = nn.ModuleList([Head(head_size) for _ in range(num_heads)])\n",
" self.proj = nn.Linear(head_size * num_heads, n_embd)\n",
" self.dropout = nn.Dropout(dropout)\n",
"\n",
" def forward(self, x):\n",
" out = torch.cat([h(x) for h in self.heads], dim=-1)\n",
" out = self.dropout(self.proj(out))\n",
" return out\n",
"\n",
"class FeedFoward(nn.Module):\n",
" \"\"\" a simple linear layer followed by a non-linearity \"\"\"\n",
"\n",
" def __init__(self, n_embd):\n",
" super().__init__()\n",
" self.net = nn.Sequential(\n",
" nn.Linear(n_embd, 4 * n_embd),\n",
" nn.ReLU(),\n",
" nn.Linear(4 * n_embd, n_embd),\n",
" nn.Dropout(dropout),\n",
" )\n",
"\n",
" def forward(self, x):\n",
" return self.net(x)\n",
"class Block(nn.Module):\n",
" \"\"\" Transformer block: communication followed by computation \"\"\"\n",
"\n",
" def __init__(self, n_embd, n_head):\n",
" # n_embd: embedding dimension, n_head: the number of heads we'd like\n",
" super().__init__()\n",
" head_size = n_embd // n_head\n",
" self.sa = MultiHeadAttention(n_head, head_size)\n",
" self.ffwd = FeedFoward(n_embd)\n",
" self.ln1 = nn.LayerNorm(n_embd, eps=norm_eps)\n",
" self.ln2 = nn.LayerNorm(n_embd, eps=norm_eps)\n",
"\n",
" def forward(self, x):\n",
" x = x + self.sa(self.ln1(x))\n",
" x = x + self.ffwd(self.ln2(x))\n",
" return x\n",
"\n",
"class GPTLanguageModel(nn.Module):\n",
"\n",
" def __init__(self):\n",
" super().__init__()\n",
" # each token directly reads off the logits for the next token from a lookup table\n",
" self.token_embedding_table = nn.Embedding(vocab_size, n_embd)\n",
" self.position_embedding_table = nn.Embedding(block_size, n_embd)\n",
" self.blocks = nn.Sequential(*[Block(n_embd, n_head=n_head) for _ in range(n_layer)])\n",
" self.ln_f = nn.LayerNorm(n_embd, eps=norm_eps) # final layer norm\n",
" self.lm_head = nn.Linear(n_embd, vocab_size)\n",
" self.apply(self._init_weights)\n",
"\n",
" def _init_weights(self, module):\n",
" if isinstance(module, nn.Linear):\n",
" torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n",
" if module.bias is not None:\n",
" torch.nn.init.zeros_(module.bias.data)\n",
" elif isinstance(module, nn.Embedding):\n",
" torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)\n",
"\n",
" def forward(self, idx, targets=None):\n",
" B, T = idx.shape\n",
"\n",
" # idx and targets are both (B,T) tensor of integers\n",
" tok_emb = self.token_embedding_table(idx) # (B,T,C)\n",
" pos_emb = self.position_embedding_table(torch.arange(T, device=device)) # (T,C)\n",
" x = tok_emb + pos_emb # (B,T,C)\n",
" x = self.blocks(x) # (B,T,C)\n",
" x = self.ln_f(x) # (B,T,C)\n",
" logits = self.lm_head(x) # (B,T,vocab_size)\n",
"\n",
" if targets is None:\n",
" loss = None\n",
" else:\n",
" B, T, C = logits.shape\n",
" logits = logits.view(B*T, C)\n",
" targets = targets.view(B*T)\n",
" loss = F.cross_entropy(logits, targets)\n",
"\n",
" return logits, loss\n",
"\n",
" def generate(self, idx, max_new_tokens):\n",
" # idx is (B, T) array of indices in the current context\n",
" for _ in range(max_new_tokens):\n",
" # crop idx to the last block_size tokens\n",
" idx_cond = idx[:, -block_size:]\n",
" # get the predictions\n",
" logits, loss = self(idx_cond)\n",
" # focus only on the last time step\n",
" logits = logits[:, -1, :] # becomes (B, C)\n",
" # apply softmax to get probabilities\n",
" probs = F.softmax(logits, dim=-1) # (B, C)\n",
" # sample from the distribution\n",
" idx_next = torch.multinomial(probs, num_samples=1) # (B, 1)\n",
" # append sampled index to the running sequence\n",
" idx = torch.cat((idx, idx_next), dim=1) # (B, T+1)\n",
" return idx\n",
"\n",
"model = GPTLanguageModel()\n",
"checkpoint_path = '/content/drive/MyDrive/52.9_transformer_model.pth'\n",
"checkpoint = torch.load(checkpoint_path)\n",
"model.load_state_dict(checkpoint)\n",
"m = model.to(device)\n",
"\n",
"# print the number of parameters in the model\n",
"n_param = sum(p.numel() for p in m.parameters())/1e6\n",
"print(n_param, 'million')\n",
"\n",
"# create a PyTorch optimizer\n",
"optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
"steps = []\n",
"train_losses = []\n",
"val_losses = []\n",
"\n",
"for iter in range(max_iters):\n",
"\n",
" # every once in a while evaluate the loss on train and val sets\n",
" if iter % eval_interval == 0 or iter == max_iters - 1:\n",
" losses = estimate_loss()\n",
" print(f\"step {iter}: train loss {losses['train']:.4f}, val loss {losses['val']:.4f}\")\n",
"\n",
" # Store step and loss values for visualization\n",
" steps.append(iter)\n",
" train_losses.append(losses['train'])\n",
" val_losses.append(losses['val'])\n",
"\n",
" # sample a batch of data\n",
" xb, yb = get_batch('train')\n",
"\n",
" # evaluate the loss\n",
" logits, loss = model(xb, yb)\n",
" optimizer.zero_grad(set_to_none=True)\n",
" loss.backward()\n",
" optimizer.step()"
],
"metadata": {
"id": "zLfgRvabDkxs",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "36e05382-e436-4fd6-bb7a-4f491e3e9994"
},
"execution_count": null,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"52.902505 million\n",
"step 0: train loss 5.2925, val loss 5.2780\n",
"step 100: train loss 5.1310, val loss 4.9329\n",
"step 200: train loss 5.0658, val loss 4.8052\n",
"step 300: train loss 5.0129, val loss 4.7458\n",
"step 400: train loss 4.9957, val loss 4.6957\n",
"step 500: train loss 4.9448, val loss 4.6594\n",
"step 600: train loss 4.9200, val loss 4.6308\n",
"step 700: train loss 4.8983, val loss 4.6060\n",
"step 800: train loss 4.8582, val loss 4.5643\n",
"step 900: train loss 4.8539, val loss 4.5691\n",
"step 1000: train loss 4.8376, val loss 4.5232\n",
"step 1100: train loss 4.8215, val loss 4.5027\n",
"step 1200: train loss 4.8017, val loss 4.4783\n",
"step 1300: train loss 4.7649, val loss 4.4671\n",
"step 1400: train loss 4.7626, val loss 4.4521\n",
"step 1500: train loss 4.7515, val loss 4.4271\n",
"step 1600: train loss 4.7322, val loss 4.4010\n",
"step 1700: train loss 4.7119, val loss 4.4046\n",
"step 1800: train loss 4.7023, val loss 4.3749\n",
"step 1900: train loss 4.6986, val loss 4.3732\n",
"step 2000: train loss 4.6837, val loss 4.3539\n",
"step 2100: train loss 4.6763, val loss 4.3663\n",
"step 2200: train loss 4.6550, val loss 4.3402\n",
"step 2300: train loss 4.6396, val loss 4.3261\n",
"step 2400: train loss 4.6420, val loss 4.3203\n",
"step 2500: train loss 4.6337, val loss 4.3279\n",
"step 2600: train loss 4.6219, val loss 4.3131\n",
"step 2700: train loss 4.6139, val loss 4.3056\n",
"step 2800: train loss 4.5988, val loss 4.2781\n",
"step 2900: train loss 4.5987, val loss 4.2741\n",
"step 3000: train loss 4.5980, val loss 4.2652\n",
"step 3100: train loss 4.5729, val loss 4.2660\n",
"step 3200: train loss 4.5688, val loss 4.2703\n",
"step 3300: train loss 4.5536, val loss 4.2384\n",
"step 3400: train loss 4.5469, val loss 4.2426\n",
"step 3500: train loss 4.5486, val loss 4.2287\n",
"step 3600: train loss 4.5317, val loss 4.2261\n",
"step 3700: train loss 4.5175, val loss 4.2164\n",
"step 3800: train loss 4.5175, val loss 4.2231\n",
"step 3900: train loss 4.5129, val loss 4.2031\n",
"step 4000: train loss 4.5059, val loss 4.2053\n",
"step 4100: train loss 4.5076, val loss 4.1966\n",
"step 4200: train loss 4.4828, val loss 4.2038\n",
"step 4300: train loss 4.4967, val loss 4.2048\n",
"step 4400: train loss 4.4886, val loss 4.1737\n",
"step 4500: train loss 4.4836, val loss 4.1745\n",
"step 4600: train loss 4.4664, val loss 4.1736\n",
"step 4700: train loss 4.4461, val loss 4.1667\n",
"step 4800: train loss 4.4594, val loss 4.1599\n",
"step 4900: train loss 4.4391, val loss 4.1523\n",
"step 4999: train loss 4.4484, val loss 4.1480\n"
]
}
]
},
{
"cell_type": "code",
"source": [
"# save the trained model\n",
"torch.save(model.state_dict(), f\"{n_param:.1f}_model.pth\")"
],
"metadata": {
"id": "G1kHzG4M9IyF"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"torch.save(model, f\"{n_param:.0f}million_model.pth\")"
],
"metadata": {
"id": "wNngWs_gjvEe"
},
"execution_count": null,
"outputs": []
},
{
"cell_type": "code",
"source": [
"import matplotlib.pyplot as plt\n",
"\n",
"plt.figure(figsize=(10, 6))\n",
"plt.plot(steps, train_losses, label='Train Loss')\n",
"plt.plot(steps, val_losses, label='Validation Loss')\n",
"plt.title('Loss Over Steps')\n",
"plt.xlabel('Steps')\n",
"plt.ylabel('Loss')\n",
"plt.legend()\n",
"\n",
"plt.show()"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 564
},
"id": "dhXmIXq1dOZj",
"outputId": "e791d2fb-881f-4144-a5e4-f7f87a2865a5"
},
"execution_count": null,
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"