{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/aburkov/theLMbook/blob/main/instruct_GPT2.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "<div style=\"display: flex; justify-content: center;\">\n",
        "    <div style=\"background-color: #f4f6f7; padding: 15px; width: 80%;\">\n",
        "        <table style=\"width: 100%\">\n",
        "            <tr>\n",
        "                <td style=\"vertical-align: middle;\">\n",
        "                    <span style=\"font-size: 14px;\">\n",
        "                        A notebook for <a href=\"https://www.thelmbook.com\" target=\"_blank\" rel=\"noopener\">The Hundred-Page Language Models Book</a> by Andriy Burkov<br><br>\n",
        "                        Code repository: <a href=\"https://github.com/aburkov/theLMbook\" target=\"_blank\" rel=\"noopener\">https://github.com/aburkov/theLMbook</a>\n",
        "                    </span>\n",
        "                </td>\n",
        "                <td style=\"vertical-align: middle;\">\n",
        "                    <a href=\"https://www.thelmbook.com\" target=\"_blank\" rel=\"noopener\">\n",
        "                        <img src=\"https://thelmbook.com/img/book.png\" width=\"80px\" alt=\"The Hundred-Page Language Models Book\">\n",
        "                    </a>\n",
        "                </td>\n",
        "            </tr>\n",
        "        </table>\n",
        "    </div>\n",
        "</div>"
      ],
      "metadata": {
        "id": "Wu6Fr-_WuSMZ"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "yy0zjL_2ouOU",
        "outputId": "fa6f8e9f-283b-4362-d1e1-c6a47fd7d74b"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Using device: cuda\n",
            "\n",
            "Dataset size: 510\n",
            "Training samples: 459\n",
            "Test samples: 51\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 1/4: 100%|██████████| 29/29 [00:03<00:00,  7.61it/s, Loss=1.62]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 1 - Average loss: 1.6233\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 2/4: 100%|██████████| 29/29 [00:03<00:00,  7.80it/s, Loss=1.02]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 2 - Average loss: 1.0221\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 3/4: 100%|██████████| 29/29 [00:03<00:00,  7.83it/s, Loss=0.688]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 3 - Average loss: 0.6885\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "Epoch 4/4: 100%|██████████| 29/29 [00:03<00:00,  7.74it/s, Loss=0.422]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Epoch 4 - Average loss: 0.4222\n",
            "\n",
            "Testing finetuned model:\n",
            "Using device: cuda\n",
            "\n",
            "Input: Who is the President of the United States?\n",
            "Full generated text: George W. Bush\n",
            "<|im_end|>\n",
            "Cleaned response: George W. Bush\n"
          ]
        }
      ],
      "source": [
        "# Import required libraries\n",
        "import json            # For parsing JSON data\n",
        "import random          # For setting seeds and shuffling data\n",
        "import requests        # For downloading dataset from URL\n",
        "import torch           # Main PyTorch library\n",
        "from torch.utils.data import Dataset, DataLoader  # For dataset handling\n",
        "from transformers import AutoTokenizer, AutoModelForCausalLM, StoppingCriteria  # HuggingFace components\n",
        "from tqdm import tqdm   # Progress bar utilities\n",
        "import re               # For text normalization\n",
        "\n",
        "def set_seed(seed):\n",
        "    \"\"\"\n",
        "    Sets random seeds for reproducibility across different libraries.\n",
        "\n",
        "    Args:\n",
        "        seed (int): Seed value for random number generation\n",
        "    \"\"\"\n",
        "    # Set Python's built-in random seed\n",
        "    random.seed(seed)\n",
        "    # Set PyTorch's CPU random seed\n",
        "    torch.manual_seed(seed)\n",
        "    # Set seed for all available GPUs\n",
        "    torch.cuda.manual_seed_all(seed)\n",
        "    # Request cuDNN to use deterministic algorithms\n",
        "    torch.backends.cudnn.deterministic = True\n",
        "    # Disable cuDNN's auto-tuner for consistent behavior\n",
        "    torch.backends.cudnn.benchmark = False\n",
        "\n",
        "def build_prompt(instruction, solution=None):\n",
        "    \"\"\"\n",
        "    Creates a chat-formatted prompt with system, user, and assistant messages.\n",
        "\n",
        "    Args:\n",
        "        instruction (str): User's instruction/question\n",
        "        solution (str, optional): Expected response for training\n",
        "\n",
        "    Returns:\n",
        "        str: Formatted prompt string\n",
        "    \"\"\"\n",
        "    # Add solution with end token if provided\n",
        "    wrapped_solution = \"\"\n",
        "    if solution:\n",
        "        wrapped_solution = f\"\\n{solution}\\n<|im_end|>\"\n",
        "\n",
        "    # Build chat format with system, user, and assistant messages\n",
        "    return f\"\"\"<|im_start|>system\n",
        "You are a helpful assistant.\n",
        "<|im_end|>\n",
        "<|im_start|>user\n",
        "{instruction}\n",
        "<|im_end|>\n",
        "<|im_start|>assistant\"\"\" + wrapped_solution\n",
        "\n",
        "def encode_text(tokenizer, text, return_tensor=False):\n",
        "    \"\"\"\n",
        "    Encodes text using the provided tokenizer.\n",
        "\n",
        "    Args:\n",
        "        tokenizer: Hugging Face tokenizer\n",
        "        text (str): Text to encode\n",
        "        return_tensor (bool): Whether to return PyTorch tensor\n",
        "\n",
        "    Returns:\n",
        "        List or tensor of token IDs\n",
        "    \"\"\"\n",
        "    # If tensor output is requested, encode with PyTorch tensors\n",
        "    if return_tensor:\n",
        "        return tokenizer.encode(\n",
        "            text, add_special_tokens=False, return_tensors=\"pt\"\n",
        "        )\n",
        "    # Otherwise return list of token IDs\n",
        "    else:\n",
        "        return tokenizer.encode(text, add_special_tokens=False)\n",
        "\n",
        "class EndTokenStoppingCriteria(StoppingCriteria):\n",
        "    \"\"\"\n",
        "    Custom stopping criteria for text generation.\n",
        "    Stops when a specific end token sequence is generated.\n",
        "\n",
        "    Args:\n",
        "        end_tokens (list): Token IDs that signal generation should stop\n",
        "        device: Device where the model is running\n",
        "    \"\"\"\n",
        "    def __init__(self, end_tokens, device):\n",
        "        self.end_tokens = torch.tensor(end_tokens).to(device)\n",
        "\n",
        "    def __call__(self, input_ids, scores):\n",
        "        \"\"\"\n",
        "        Checks if generation should stop for each sequence.\n",
        "\n",
        "        Args:\n",
        "            input_ids: Current generated token IDs\n",
        "            scores: Token probabilities\n",
        "\n",
        "        Returns:\n",
        "            tensor: Boolean tensor indicating which sequences should stop\n",
        "        \"\"\"\n",
        "        should_stop = []\n",
        "\n",
        "        # Check each sequence for end tokens\n",
        "        for sequence in input_ids:\n",
        "            if len(sequence) >= len(self.end_tokens):\n",
        "                # Compare last tokens with end tokens\n",
        "                last_tokens = sequence[-len(self.end_tokens):]\n",
        "                should_stop.append(torch.all(last_tokens == self.end_tokens))\n",
        "            else:\n",
        "                should_stop.append(False)\n",
        "\n",
        "        return torch.tensor(should_stop, device=input_ids.device)\n",
        "\n",
        "class PromptCompletionDataset(Dataset):\n",
        "    \"\"\"\n",
        "    PyTorch Dataset for instruction-completion pairs.\n",
        "    Handles the conversion of text data into model-ready format.\n",
        "\n",
        "    Args:\n",
        "        data (list): List of dictionaries containing instructions and solutions\n",
        "        tokenizer: Hugging Face tokenizer\n",
        "    \"\"\"\n",
        "    def __init__(self, data, tokenizer):\n",
        "        self.data = data\n",
        "        self.tokenizer = tokenizer\n",
        "\n",
        "    def __len__(self):\n",
        "        # Return total number of examples\n",
        "        return len(self.data)\n",
        "\n",
        "    def __getitem__(self, idx):\n",
        "        \"\"\"\n",
        "        Returns a single training example.\n",
        "\n",
        "        Args:\n",
        "            idx (int): Index of the example to fetch\n",
        "\n",
        "        Returns:\n",
        "            dict: Contains input_ids, labels, prompt, and expected completion\n",
        "        \"\"\"\n",
        "        # Get example from dataset\n",
        "        item = self.data[idx]\n",
        "        # Build full prompt with instruction\n",
        "        prompt = build_prompt(item[\"instruction\"])\n",
        "        # Format completion with end token\n",
        "        completion = f\"\"\"{item[\"solution\"]}\\n<|im_end|>\"\"\"\n",
        "\n",
        "        # Convert text to token IDs\n",
        "        encoded_prompt = encode_text(self.tokenizer, prompt)\n",
        "        encoded_completion = encode_text(self.tokenizer, completion)\n",
        "        eos_token = [self.tokenizer.eos_token_id]\n",
        "\n",
        "        # Combine for full input sequence\n",
        "        input_ids = encoded_prompt + encoded_completion + eos_token\n",
        "        # Create labels: -100 for prompt (ignored in loss)\n",
        "        labels = [-100] * len(encoded_prompt) + encoded_completion + eos_token\n",
        "\n",
        "        return {\n",
        "            \"input_ids\": input_ids,\n",
        "            \"labels\": labels,\n",
        "            \"prompt\": prompt,\n",
        "            \"expected_completion\": completion\n",
        "        }\n",
        "\n",
        "def collate_fn(batch):\n",
        "    \"\"\"\n",
        "    Collates batch of examples into training-ready format.\n",
        "    Handles padding and conversion to tensors.\n",
        "\n",
        "    Args:\n",
        "        batch: List of examples from Dataset\n",
        "\n",
        "    Returns:\n",
        "        tuple: (input_ids, attention_mask, labels, prompts, expected_completions)\n",
        "    \"\"\"\n",
        "    # Find longest sequence for padding\n",
        "    max_length = max(len(item[\"input_ids\"]) for item in batch)\n",
        "\n",
        "    # Pad input sequences\n",
        "    input_ids = [\n",
        "        item[\"input_ids\"] +\n",
        "        [tokenizer.pad_token_id] * (max_length - len(item[\"input_ids\"]))\n",
        "        for item in batch\n",
        "    ]\n",
        "    # Pad label sequences\n",
        "    labels = [\n",
        "        item[\"labels\"] +\n",
        "        [-100] * (max_length - len(item[\"labels\"]))\n",
        "        for item in batch\n",
        "    ]\n",
        "    # Create attention masks\n",
        "    attention_mask = [\n",
        "        [1] * len(item[\"input_ids\"]) +\n",
        "        [0] * (max_length - len(item[\"input_ids\"]))\n",
        "        for item in batch\n",
        "    ]\n",
        "    prompts = [item[\"prompt\"] for item in batch]\n",
        "    expected_completions = [item[\"expected_completion\"] for item in batch]\n",
        "\n",
        "    return (\n",
        "        torch.tensor(input_ids),\n",
        "        torch.tensor(attention_mask),\n",
        "        torch.tensor(labels),\n",
        "        prompts,\n",
        "        expected_completions\n",
        "    )\n",
        "\n",
        "def normalize_text(text):\n",
        "    \"\"\"\n",
        "    Normalizes text for consistent comparison.\n",
        "\n",
        "    Args:\n",
        "        text (str): Input text\n",
        "\n",
        "    Returns:\n",
        "        str: Normalized text\n",
        "    \"\"\"\n",
        "    # Remove leading/trailing whitespace and convert to lowercase\n",
        "    text = text.strip().lower()\n",
        "    # Replace multiple whitespace characters with single space\n",
        "    text = re.sub(r'\\s+', ' ', text)\n",
        "    return text\n",
        "\n",
        "def generate_text(model, tokenizer, prompt, max_new_tokens=100):\n",
        "    \"\"\"\n",
        "    Generates text completion for a given prompt.\n",
        "\n",
        "    Args:\n",
        "        model: Fine-tuned model\n",
        "        tokenizer: Associated tokenizer\n",
        "        prompt (str): Input prompt\n",
        "        max_new_tokens (int): Maximum number of tokens to generate\n",
        "\n",
        "    Returns:\n",
        "        str: Generated completion\n",
        "    \"\"\"\n",
        "    # Encode prompt and move to model's device\n",
        "    input_ids = tokenizer(prompt, return_tensors=\"pt\").to(model.device)\n",
        "\n",
        "    # Setup end token detection\n",
        "    end_tokens = tokenizer.encode(\"<|im_end|>\", add_special_tokens=False)\n",
        "    stopping_criteria = [EndTokenStoppingCriteria(end_tokens, model.device)]\n",
        "\n",
        "    # Generate completion\n",
        "    output_ids = model.generate(\n",
        "        input_ids=input_ids[\"input_ids\"],\n",
        "        attention_mask=input_ids[\"attention_mask\"],\n",
        "        max_new_tokens=max_new_tokens,\n",
        "        pad_token_id=tokenizer.pad_token_id,\n",
        "        stopping_criteria=stopping_criteria\n",
        "    )[0]\n",
        "\n",
        "    # Extract and decode only the generated part\n",
        "    generated_ids = output_ids[input_ids[\"input_ids\"].shape[1]:]\n",
        "    generated_text = tokenizer.decode(generated_ids).strip()\n",
        "    return generated_text\n",
        "\n",
        "def test_model(model_path, test_input):\n",
        "    \"\"\"\n",
        "    Tests a saved model on a single input.\n",
        "\n",
        "    Args:\n",
        "        model_path (str): Path to saved model\n",
        "        test_input (str): Instruction to test\n",
        "    \"\"\"\n",
        "    # Setup device and load model\n",
        "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "    print(f\"Using device: {device}\")\n",
        "\n",
        "    # Load model and tokenizer\n",
        "    model = AutoModelForCausalLM.from_pretrained(model_path).to(device)\n",
        "    tokenizer = AutoTokenizer.from_pretrained(model_path)\n",
        "    tokenizer.pad_token = tokenizer.eos_token\n",
        "\n",
        "    # Generate and display prediction\n",
        "    prompt = build_prompt(test_input)\n",
        "    generated_text = generate_text(model, tokenizer, prompt)\n",
        "\n",
        "    print(f\"\\nInput: {test_input}\")\n",
        "    print(f\"Full generated text: {generated_text}\")\n",
        "    print(f\"\"\"Cleaned response: {generated_text.replace(\"<|im_end|>\", \"\").strip()}\"\"\")\n",
        "\n",
        "def download_and_prepare_data(data_url, tokenizer, batch_size, test_ratio=0.1):\n",
        "    \"\"\"\n",
        "    Downloads and prepares dataset for training.\n",
        "\n",
        "    Args:\n",
        "        data_url (str): URL of the dataset\n",
        "        tokenizer: Tokenizer for text processing\n",
        "        batch_size (int): Batch size for DataLoader\n",
        "        test_ratio (float): Proportion of data for testing\n",
        "\n",
        "    Returns:\n",
        "        tuple: (train_loader, test_loader)\n",
        "    \"\"\"\n",
        "    # Download dataset\n",
        "    response = requests.get(data_url)\n",
        "    dataset = []\n",
        "    # Parse each line as an instruction-solution pair\n",
        "    for line in response.text.splitlines():\n",
        "        if line.strip():  # Skip empty lines\n",
        "            entry = json.loads(line)\n",
        "            dataset.append({\n",
        "                \"instruction\": entry[\"instruction\"],\n",
        "                \"solution\": entry[\"solution\"]\n",
        "            })\n",
        "\n",
        "    # Split into train and test sets\n",
        "    random.shuffle(dataset)\n",
        "    split_index = int(len(dataset) * (1 - test_ratio))\n",
        "    train_data = dataset[:split_index]\n",
        "    test_data = dataset[split_index:]\n",
        "\n",
        "    # Print dataset statistics\n",
        "    print(f\"\\nDataset size: {len(dataset)}\")\n",
        "    print(f\"Training samples: {len(train_data)}\")\n",
        "    print(f\"Test samples: {len(test_data)}\")\n",
        "\n",
        "    # Create datasets\n",
        "    train_dataset = PromptCompletionDataset(train_data, tokenizer)\n",
        "    test_dataset = PromptCompletionDataset(test_data, tokenizer)\n",
        "\n",
        "    # Create dataloaders\n",
        "    train_loader = DataLoader(\n",
        "        train_dataset,\n",
        "        batch_size=batch_size,\n",
        "        shuffle=True,\n",
        "        collate_fn=collate_fn\n",
        "    )\n",
        "    test_loader = DataLoader(\n",
        "        test_dataset,\n",
        "        batch_size=batch_size,\n",
        "        shuffle=False,\n",
        "        collate_fn=collate_fn\n",
        "    )\n",
        "\n",
        "    return train_loader, test_loader\n",
        "\n",
        "def get_hyperparameters():\n",
        "    \"\"\"\n",
        "    Returns training hyperparameters.\n",
        "\n",
        "    Returns:\n",
        "        tuple: (num_epochs, batch_size, learning_rate)\n",
        "    \"\"\"\n",
        "    # Fewer epochs for instruction tuning as it's more data-efficient\n",
        "    num_epochs = 4\n",
        "    # Standard batch size that works well with most GPU memory\n",
        "    batch_size = 16\n",
        "    # Standard learning rate for fine-tuning transformers\n",
        "    learning_rate = 5e-5\n",
        "\n",
        "    return num_epochs, batch_size, learning_rate\n",
        "\n",
        "# Main training script\n",
        "if __name__ == \"__main__\":\n",
        "    # Set random seed for reproducibility\n",
        "    set_seed(42)\n",
        "\n",
        "    # Configure training parameters\n",
        "    data_url = \"https://www.thelmbook.com/data/instruct\"\n",
        "    model_name = \"openai-community/gpt2\"\n",
        "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "    print(f\"Using device: {device}\")\n",
        "\n",
        "    # Initialize tokenizer and model\n",
        "    tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
        "    tokenizer.pad_token = tokenizer.eos_token\n",
        "\n",
        "    model = AutoModelForCausalLM.from_pretrained(model_name).to(device)\n",
        "\n",
        "    # Get hyperparameters and prepare data\n",
        "    num_epochs, batch_size, learning_rate = get_hyperparameters()\n",
        "    train_loader, test_loader = download_and_prepare_data(data_url, tokenizer, batch_size)\n",
        "\n",
        "    # Initialize optimizer\n",
        "    optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n",
        "\n",
        "    # Training loop\n",
        "    for epoch in range(num_epochs):\n",
        "        total_loss = 0\n",
        "        num_batches = 0\n",
        "        progress_bar = tqdm(train_loader, desc=f\"Epoch {epoch+1}/{num_epochs}\")\n",
        "\n",
        "        for input_ids, attention_mask, labels, _, _ in progress_bar:\n",
        "            # Move batch to device\n",
        "            input_ids = input_ids.to(device)\n",
        "            attention_mask = attention_mask.to(device)\n",
        "            labels = labels.to(device)\n",
        "\n",
        "            # Forward pass\n",
        "            outputs = model(\n",
        "                input_ids=input_ids,\n",
        "                attention_mask=attention_mask,\n",
        "                labels=labels\n",
        "            )\n",
        "            loss = outputs.loss\n",
        "\n",
        "            # Backward pass and optimization\n",
        "            loss.backward()\n",
        "            optimizer.step()\n",
        "            optimizer.zero_grad()\n",
        "\n",
        "            # Update metrics\n",
        "            total_loss += loss.item()\n",
        "            num_batches += 1\n",
        "\n",
        "            progress_bar.set_postfix({\"Loss\": total_loss / num_batches})\n",
        "\n",
        "        # Display epoch metrics\n",
        "        avg_loss = total_loss / num_batches\n",
        "        print(f\"Epoch {epoch+1} - Average loss: {avg_loss:.4f}\")\n",
        "\n",
        "    # Save the fine-tuned model\n",
        "    model.save_pretrained(\"./finetuned_model\")\n",
        "    tokenizer.save_pretrained(\"./finetuned_model\")\n",
        "\n",
        "    # Test the model\n",
        "    print(\"\\nTesting finetuned model:\")\n",
        "    test_input = \"Who is the President of the United States?\"\n",
        "    test_model(\"./finetuned_model\", test_input)"
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "A100",
      "authorship_tag": "ABX9TyMNhBkBP6E26XdVthfpawAc",
      "include_colab_link": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU"
  },
  "nbformat": 4,
  "nbformat_minor": 0
}