{
  "cells": [
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "view-in-github",
        "colab_type": "text"
      },
      "source": [
        "<a href=\"https://colab.research.google.com/github/aburkov/theLMbook/blob/main/count_language_model.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "<div style=\"display: flex; justify-content: center;\">\n",
        "    <div style=\"background-color: #f4f6f7; padding: 15px; width: 80%;\">\n",
        "        <table style=\"width: 100%\">\n",
        "            <tbody><tr>\n",
        "                <td style=\"vertical-align: middle;\">\n",
        "                    <span style=\"font-size: 14px;\">\n",
        "                        A notebook for <a rel=\"noopener\" href=\"https://www.thelmbook.com\">The Hundred-Page Language Models Book</a> by Andriy Burkov<br><br>\n",
        "                        Code repository: <a rel=\"noopener\" href=\"https://github.com/aburkov/theLMbook\">https://github.com/aburkov/theLMbook</a>\n",
        "                    </span>\n",
        "                </td>\n",
        "                <td style=\"vertical-align: middle;\">\n",
        "                    <a rel=\"noopener\" href=\"https://www.thelmbook.com\">\n",
        "                        <img data-canonical-src=\"https://thelmbook.com/img/book.png\" alt=\"The Hundred-Page Language Models Book\" width=\"80px\" src=\"https://camo.githubusercontent.com/f3418fcb7da7c0ac7557629af1c0ccdf35ccff5ebe788c9987a4874c1a103c84/68747470733a2f2f7468656c6d626f6f6b2e636f6d2f696d672f626f6f6b2e706e67\">\n",
        "                    </a>\n",
        "                </td>\n",
        "            </tr>\n",
        "        </tbody></table>\n",
        "    </div>\n",
        "</div>"
      ],
      "metadata": {
        "id": "IdYAR6C2_wUi"
      }
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Count-based language model\n",
        "\n",
        "## Utility functions and classes\n",
        "\n",
        "In the cell below, we import the dependencies and define the utility functions and the model class:"
      ],
      "metadata": {
        "id": "4R2DpglXC7R4"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "yy0zjL_2ouOU"
      },
      "outputs": [],
      "source": [
        "# Import required libraries\n",
        "import re         # For regular expressions (text tokenization)\n",
        "import requests   # For downloading the corpus\n",
        "import gzip       # For decompressing the downloaded corpus\n",
        "import io         # For handling byte streams\n",
        "import math       # For mathematical operations (log, exp)\n",
        "import random     # For random number generation\n",
        "from collections import defaultdict  # For efficient dictionary operations\n",
        "import pickle, os # For saving and loading the model\n",
        "\n",
        "def set_seed(seed):\n",
        "    \"\"\"\n",
        "    Sets random seeds for reproducibility.\n",
        "\n",
        "    Args:\n",
        "        seed (int): Seed value for the random number generator\n",
        "    \"\"\"\n",
        "    random.seed(seed)\n",
        "\n",
        "def download_corpus(url):\n",
        "    \"\"\"\n",
        "    Downloads and decompresses a gzipped corpus file from the given URL.\n",
        "\n",
        "    Args:\n",
        "        url (str): URL of the gzipped corpus file\n",
        "\n",
        "    Returns:\n",
        "        str: Decoded text content of the corpus\n",
        "\n",
        "    Raises:\n",
        "        HTTPError: If the download fails\n",
        "    \"\"\"\n",
        "    print(f\"Downloading corpus from {url}...\")\n",
        "    response = requests.get(url)\n",
        "    response.raise_for_status()  # Raises an exception for bad HTTP responses\n",
        "\n",
        "    print(\"Decompressing and reading the corpus...\")\n",
        "    with gzip.GzipFile(fileobj=io.BytesIO(response.content)) as f:\n",
        "        corpus = f.read().decode('utf-8')\n",
        "\n",
        "    print(f\"Corpus size: {len(corpus)} characters\")\n",
        "    return corpus\n",
        "\n",
        "class CountLanguageModel:\n",
        "    \"\"\"\n",
        "    Implements an n-gram language model using count-based probability estimation.\n",
        "    Supports variable context lengths up to n-grams.\n",
        "    \"\"\"\n",
        "    def __init__(self, n):\n",
        "        \"\"\"\n",
        "        Initialize the model with maximum n-gram length.\n",
        "\n",
        "        Args:\n",
        "            n (int): Maximum length of n-grams to use\n",
        "        \"\"\"\n",
        "        self.n = n  # Maximum n-gram length\n",
        "        self.ngram_counts = [{} for _ in range(n)]  # List of dictionaries for each n-gram length\n",
        "        self.total_unigrams = 0  # Total number of tokens in training data\n",
        "\n",
        "    def predict_next_token(self, context):\n",
        "        \"\"\"\n",
        "        Predicts the most likely next token given a context.\n",
        "        Uses backoff strategy: tries largest n-gram first, then backs off to smaller n-grams.\n",
        "\n",
        "        Args:\n",
        "            context (list): List of tokens providing context for prediction\n",
        "\n",
        "        Returns:\n",
        "            str: Most likely next token, or None if no prediction can be made\n",
        "        \"\"\"\n",
        "        for n in range(self.n, 1, -1):  # Start with largest n-gram, back off to smaller ones\n",
        "            if len(context) >= n - 1:\n",
        "                context_n = tuple(context[-(n - 1):])  # Get the relevant context for this n-gram\n",
        "                counts = self.ngram_counts[n - 1].get(context_n)\n",
        "                if counts:\n",
        "                    return max(counts.items(), key=lambda x: x[1])[0]  # Return most frequent token\n",
        "        # Backoff to unigram if no larger context matches\n",
        "        unigram_counts = self.ngram_counts[0].get(())\n",
        "        if unigram_counts:\n",
        "            return max(unigram_counts.items(), key=lambda x: x[1])[0]\n",
        "        return None\n",
        "\n",
        "    def get_probability(self, token, context):\n",
        "        for n in range(self.n, 1, -1):\n",
        "            if len(context) >= n - 1:\n",
        "                context_n = tuple(context[-(n - 1):])\n",
        "                counts = self.ngram_counts[n - 1].get(context_n)\n",
        "                if counts:\n",
        "                    total = sum(counts.values())\n",
        "                    count = counts.get(token, 0)\n",
        "                    if count > 0:\n",
        "                        return count / total\n",
        "        unigram_counts = self.ngram_counts[0].get(())\n",
        "        count = unigram_counts.get(token, 0)\n",
        "        V = len(unigram_counts)\n",
        "        return (count + 1) / (self.total_unigrams + V)\n",
        "\n",
        "def train(model, tokens):\n",
        "    \"\"\"\n",
        "    Trains the language model by counting n-grams in the training data.\n",
        "\n",
        "    Args:\n",
        "        model (CountLanguageModel): Model to train\n",
        "        tokens (list): List of tokens from the training corpus\n",
        "    \"\"\"\n",
        "    # Train models for each n-gram size from 1 to n\n",
        "    for n in range(1, model.n + 1):\n",
        "        counts = model.ngram_counts[n - 1]\n",
        "        # Slide a window of size n over the corpus\n",
        "        for i in range(len(tokens) - n + 1):\n",
        "            # Split into context (n-1 tokens) and next token\n",
        "            context = tuple(tokens[i:i + n - 1])\n",
        "            next_token = tokens[i + n - 1]\n",
        "\n",
        "            # Initialize counts dictionary for this context if needed\n",
        "            if context not in counts:\n",
        "                counts[context] = defaultdict(int)\n",
        "\n",
        "            # Increment count for this context-token pair\n",
        "            counts[context][next_token] = counts[context][next_token] + 1\n",
        "\n",
        "    # Store total number of tokens for unigram probability calculations\n",
        "    model.total_unigrams = len(tokens)\n",
        "\n",
        "def generate_text(model, context, num_tokens):\n",
        "    \"\"\"\n",
        "    Generates text by repeatedly sampling from the model.\n",
        "\n",
        "    Args:\n",
        "        model (CountLanguageModel): Trained language model\n",
        "        context (list): Initial context tokens\n",
        "        num_tokens (int): Number of tokens to generate\n",
        "\n",
        "    Returns:\n",
        "        str: Generated text including initial context\n",
        "    \"\"\"\n",
        "    # Start with the provided context\n",
        "    generated = list(context)\n",
        "\n",
        "    # Generate new tokens until we reach the desired length\n",
        "    while len(generated) - len(context) < num_tokens:\n",
        "        # Use the last n-1 tokens as context for prediction\n",
        "        next_token = model.predict_next_token(generated[-(model.n-1):])\n",
        "        generated.append(next_token)\n",
        "\n",
        "        # Stop if we've generated enough tokens AND found a period\n",
        "        # This helps ensure complete sentences\n",
        "        if len(generated) - len(context) >= num_tokens and next_token == '.':\n",
        "            break\n",
        "\n",
        "    # Join tokens with spaces to create readable text\n",
        "    return ' '.join(generated)\n",
        "\n",
        "def compute_perplexity(model, tokens, context_size):\n",
        "    \"\"\"\n",
        "    Computes perplexity of the model on given tokens.\n",
        "\n",
        "    Args:\n",
        "        model (CountLanguageModel): Trained language model\n",
        "        tokens (list): List of tokens to evaluate on\n",
        "        context_size (int): Maximum context size to consider\n",
        "\n",
        "    Returns:\n",
        "        float: Perplexity score (lower is better)\n",
        "    \"\"\"\n",
        "    # Handle empty token list\n",
        "    if not tokens:\n",
        "        return float('inf')\n",
        "\n",
        "    # Initialize log likelihood accumulator\n",
        "    total_log_likelihood = 0\n",
        "    num_tokens = len(tokens)\n",
        "\n",
        "    # Calculate probability for each token given its context\n",
        "    for i in range(num_tokens):\n",
        "        # Get appropriate context window, handling start of sequence\n",
        "        context_start = max(0, i - context_size)\n",
        "        context = tuple(tokens[context_start:i])\n",
        "        token = tokens[i]\n",
        "\n",
        "        # Get probability of this token given its context\n",
        "        probability = model.get_probability(token, context)\n",
        "\n",
        "        # Add log probability to total (using log for numerical stability)\n",
        "        total_log_likelihood += math.log(probability)\n",
        "\n",
        "    # Calculate average log likelihood\n",
        "    average_log_likelihood = total_log_likelihood / num_tokens\n",
        "\n",
        "    # Convert to perplexity: exp(-average_log_likelihood)\n",
        "    # Lower perplexity indicates better model performance\n",
        "    perplexity = math.exp(-average_log_likelihood)\n",
        "    return perplexity\n",
        "\n",
        "def tokenize(text):\n",
        "    \"\"\"\n",
        "    Tokenizes text into words and periods.\n",
        "\n",
        "    Args:\n",
        "        text (str): Input text to tokenize\n",
        "\n",
        "    Returns:\n",
        "        list: List of lowercase tokens matching words or periods\n",
        "    \"\"\"\n",
        "    return re.findall(r\"\\b[a-zA-Z0-9]+\\b|[.]\", text.lower())\n",
        "\n",
        "def download_and_prepare_data(data_url):\n",
        "    \"\"\"\n",
        "    Downloads and prepares training and test data.\n",
        "\n",
        "    Args:\n",
        "        data_url (str): URL of the corpus to download\n",
        "\n",
        "    Returns:\n",
        "        tuple: (training_tokens, test_tokens) split 90/10\n",
        "    \"\"\"\n",
        "    # Download and extract the corpus\n",
        "    corpus = download_corpus(data_url)\n",
        "\n",
        "    # Convert text to tokens\n",
        "    tokens = tokenize(corpus)\n",
        "\n",
        "    # Split into training (90%) and test (10%) sets\n",
        "    split_index = int(len(tokens) * 0.9)\n",
        "    train_corpus = tokens[:split_index]\n",
        "    test_corpus = tokens[split_index:]\n",
        "\n",
        "    return train_corpus, test_corpus\n",
        "\n",
        "def save_model(model, model_name):\n",
        "    \"\"\"\n",
        "    Saves the trained language model to disk.\n",
        "\n",
        "    Args:\n",
        "        model (CountLanguageModel): Trained model to save\n",
        "        model_name (str): Name to use for the saved model file\n",
        "\n",
        "    Returns:\n",
        "        str: Path to the saved model file\n",
        "\n",
        "    Raises:\n",
        "        IOError: If there's an error writing to disk\n",
        "    \"\"\"\n",
        "    # Create models directory if it doesn't exist\n",
        "    os.makedirs('models', exist_ok=True)\n",
        "\n",
        "    # Construct file path\n",
        "    model_path = os.path.join('models', f'{model_name}.pkl')\n",
        "\n",
        "    try:\n",
        "        print(f\"Saving model to {model_path}...\")\n",
        "        with open(model_path, 'wb') as f:\n",
        "            pickle.dump({\n",
        "                'n': model.n,\n",
        "                'ngram_counts': model.ngram_counts,\n",
        "                'total_unigrams': model.total_unigrams\n",
        "            }, f)\n",
        "        print(\"Model saved successfully.\")\n",
        "        return model_path\n",
        "    except IOError as e:\n",
        "        print(f\"Error saving model: {e}\")\n",
        "        raise\n",
        "\n",
        "def load_model(model_name):\n",
        "    \"\"\"\n",
        "    Loads a trained language model from disk.\n",
        "\n",
        "    Args:\n",
        "        model_name (str): Name of the model to load\n",
        "\n",
        "    Returns:\n",
        "        CountLanguageModel: Loaded model instance\n",
        "\n",
        "    Raises:\n",
        "        FileNotFoundError: If the model file doesn't exist\n",
        "        IOError: If there's an error reading the file\n",
        "    \"\"\"\n",
        "    model_path = os.path.join('models', f'{model_name}.pkl')\n",
        "\n",
        "    try:\n",
        "        print(f\"Loading model from {model_path}...\")\n",
        "        with open(model_path, 'rb') as f:\n",
        "            model_data = pickle.load(f)\n",
        "\n",
        "        # Create new model instance\n",
        "        model = CountLanguageModel(model_data['n'])\n",
        "\n",
        "        # Restore model state\n",
        "        model.ngram_counts = model_data['ngram_counts']\n",
        "        model.total_unigrams = model_data['total_unigrams']\n",
        "\n",
        "        print(\"Model loaded successfully.\")\n",
        "        return model\n",
        "    except FileNotFoundError:\n",
        "        print(f\"Model file not found: {model_path}\")\n",
        "        raise\n",
        "    except IOError as e:\n",
        "        print(f\"Error loading model: {e}\")\n",
        "        raise\n",
        "\n",
        "def get_hyperparameters():\n",
        "    \"\"\"\n",
        "    Returns model hyperparameters.\n",
        "\n",
        "    Returns:\n",
        "        int: Size of n-grams to use in the model\n",
        "    \"\"\"\n",
        "    n = 5\n",
        "    return n"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Training the model\n",
        "\n",
        "In the cell below, we load the data, train, and save the model:"
      ],
      "metadata": {
        "id": "ug7YhOF6Dczx"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# Main model training block\n",
        "if __name__ == \"__main__\":\n",
        "    # Initialize random seeds for reproducibility\n",
        "    set_seed(42)\n",
        "    n = get_hyperparameters()\n",
        "    model_name = \"count_model\"\n",
        "\n",
        "    # Download and prepare the Brown corpus\n",
        "    data_url = \"https://www.thelmbook.com/data/brown\"\n",
        "    train_corpus, test_corpus = download_and_prepare_data(data_url)\n",
        "\n",
        "    # Train the model and evaluate its performance\n",
        "    print(\"\\nTraining the model...\")\n",
        "    model = CountLanguageModel(n)\n",
        "    train(model, train_corpus)\n",
        "    print(\"\\nModel training complete.\")\n",
        "\n",
        "    perplexity = compute_perplexity(model, test_corpus, n)\n",
        "    print(f\"\\nPerplexity on test corpus: {perplexity:.2f}\")\n",
        "\n",
        "    save_model(model, model_name)"
      ],
      "metadata": {
        "id": "l1z9hxbJDmm0",
        "outputId": "f3a798a5-abcc-4a2c-c54d-54093c48bcf4",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Downloading corpus from https://www.thelmbook.com/data/brown...\n",
            "Decompressing and reading the corpus...\n",
            "Corpus size: 6185606 characters\n",
            "\n",
            "Training the model...\n",
            "\n",
            "Model training complete.\n",
            "\n",
            "Perplexity on test corpus: 299.06\n",
            "Saving model to models/count_model.pkl...\n",
            "Model saved successfully.\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "## Testing the model\n",
        "\n",
        "Below, we load the trained model and use it to generate text:"
      ],
      "metadata": {
        "id": "Glud7JWgDyMb"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "# Main model testing block\n",
        "if __name__ == \"__main__\":\n",
        "\n",
        "    model = load_model(model_name)\n",
        "\n",
        "    # Test the model with some example contexts\n",
        "    contexts = [\n",
        "        \"i will build a\",\n",
        "        \"the best place to\",\n",
        "        \"she was riding a\"\n",
        "    ]\n",
        "\n",
        "    # Generate completions for each context\n",
        "    for context in contexts:\n",
        "        tokens = tokenize(context)\n",
        "        next_token = model.predict_next_token(tokens)\n",
        "        print(f\"\\nContext: {context}\")\n",
        "        print(f\"Next token: {next_token}\")\n",
        "        print(f\"Generated text: {generate_text(model, tokens, 10)}\")"
      ],
      "metadata": {
        "id": "tesX-DH9D23a",
        "outputId": "886a9061-e2eb-478d-add1-62dee4052540",
        "colab": {
          "base_uri": "https://localhost:8080/"
        }
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Loading model from models/count_model.pkl...\n",
            "Model loaded successfully.\n",
            "\n",
            "Context: i will build a\n",
            "Next token: wall\n",
            "Generated text: i will build a wall to keep the people in and added so long\n",
            "\n",
            "Context: the best place to\n",
            "Next token: live\n",
            "Generated text: the best place to live in 30 per cent to get happiness for yourself\n",
            "\n",
            "Context: she was riding a\n",
            "Next token: horse\n",
            "Generated text: she was riding a horse and showing a dog are very similar your aids\n"
          ]
        }
      ]
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "authorship_tag": "ABX9TyM1ddESY73Kx2xDPOENzz6k",
      "include_colab_link": true
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}