{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "eb29ed75",
   "metadata": {},
   "source": [
    "# SFT_GRPO_5: Calculating loss in GRPO"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c3ee5093",
   "metadata": {},
   "source": [
    "Start by loading dependencies and BabyLLama, a small LLM to use in this lesson:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "88d4401c-b67a-4bf1-868c-53792501e4b6",
   "metadata": {
    "height": 200
   },
   "outputs": [],
   "source": [
    "from transformers import AutoModelForCausalLM, AutoTokenizer\n",
    "from utils import *\n",
    "\n",
    "# Initialize model and tokenizer\n",
    "model_str = 'babylm/babyllama-100m-2024'\n",
    "base_model = AutoModelForCausalLM.from_pretrained(model_str)\n",
    "tokenizer = AutoTokenizer.from_pretrained(model_str)\n",
    "\n",
    "# pad on the left so we can append new tokens on the right\n",
    "tokenizer.padding_side = \"left\"\n",
    "tokenizer.truncation_side = \"left\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7deb458-16bf-46d5-880e-e2736285d47b",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": [
    "print(base_model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45480484-7268-4b73-9ba7-c1a1ad73433d",
   "metadata": {
    "height": 368
   },
   "outputs": [],
   "source": [
    "prompt = \"The quick brown fox jumped over the \"\n",
    "\n",
    "# Tokenize the prompt\n",
    "input_ids = tokenizer(prompt, return_tensors=\"pt\")\n",
    "print(input_ids)\n",
    "\n",
    "# Generate next 2 tokens\n",
    "with torch.no_grad():\n",
    "    outputs = base_model.generate(\n",
    "        **input_ids,\n",
    "        max_new_tokens=2,\n",
    "        pad_token_id=tokenizer.pad_token_id\n",
    "    )\n",
    "\n",
    "# Decode the generated text\n",
    "generated_text = tokenizer.decode(\n",
    "    outputs[0], skip_special_tokens=True\n",
    ")\n",
    "generated_portion = generated_text[len(prompt):]\n",
    "print(f\"Generated text: {prompt}\\033[94m{generated_portion}\\033[0m\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b8f79557",
   "metadata": {},
   "source": [
    "## Create reference and policy models \n",
    "\n",
    "The **reference model** is the base LLM, and remains unchanges throughout training. \n",
    "\n",
    "The **policy** model is the same model with a LoRA adapter applied - the weights of the LoRA adapter get updated throughout the RFT training process."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8a2023e-01d3-478c-8546-6ad2d3a6def8",
   "metadata": {
    "height": 353
   },
   "outputs": [],
   "source": [
    "import copy\n",
    "from peft import LoraConfig, get_peft_model\n",
    "\n",
    "# Create a copy of the base model to use as the reference model\n",
    "ref_model = copy.deepcopy(base_model)\n",
    "\n",
    "# Initialize LoRA configuration\n",
    "lora_config = LoraConfig(\n",
    "    r=8,  # Rank of the update matrices\n",
    "    lora_alpha=32,  # Alpha scaling factor\n",
    "    # Which modules to apply LoRA to\n",
    "    target_modules=[\"q_proj\", \"v_proj\"],  \n",
    "    lora_dropout=0.1,\n",
    "    init_lora_weights=False,\n",
    "    bias=\"none\",\n",
    "    task_type=\"CAUSAL_LM\"\n",
    ")\n",
    "\n",
    "# Apply LoRA to model\n",
    "model = get_peft_model(base_model, lora_config)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "380a602b",
   "metadata": {},
   "source": [
    "Examine the policy model:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d10b537c-3693-4d77-ad29-5c21d759dd10",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": [
    "print(model)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bc2bf818",
   "metadata": {},
   "source": [
    "## Calculating the policy loss ratio"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "10af0abd-ff56-4ce5-b818-e4d2d5de3980",
   "metadata": {
    "height": 555
   },
   "outputs": [],
   "source": [
    "def prepare_inputs(prompt, completion):\n",
    "    # Tokenization\n",
    "    prompt_tokens = tokenizer(prompt, return_tensors=\"pt\")\n",
    "    completion_tokens = tokenizer(completion, return_tensors=\"pt\")\n",
    "\n",
    "    # Combined input\n",
    "    input_ids = torch.cat(\n",
    "        [\n",
    "            prompt_tokens[\"input_ids\"],\n",
    "            completion_tokens[\"input_ids\"]\n",
    "        ], \n",
    "        dim=1\n",
    "    )\n",
    "    attention_mask = torch.cat(\n",
    "        [\n",
    "            prompt_tokens[\"attention_mask\"],\n",
    "            completion_tokens[\"attention_mask\"]\n",
    "        ],\n",
    "        dim=1\n",
    "    )\n",
    "\n",
    "    prompt_length = prompt_tokens[\"input_ids\"].shape[1]\n",
    "    completion_length = completion_tokens[\"input_ids\"].shape[1]\n",
    "    total_length = prompt_length + completion_length\n",
    "\n",
    "    # Create a mask to identify the tokens that \n",
    "    # were generated by the model in the full sequence\n",
    "    completion_mask = torch.zeros(total_length, dtype=torch.float32)\n",
    "    completion_mask[prompt_length:] = 1.0\n",
    "\n",
    "    return input_ids, attention_mask, completion_mask"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3a1784e3-25db-4d46-a43c-cfe8258cca9c",
   "metadata": {
    "height": 249
   },
   "outputs": [],
   "source": [
    "def compute_log_probs(model, input_ids, attention_mask):\n",
    "    outputs = model(input_ids, attention_mask=attention_mask)\n",
    "    \n",
    "    # Computing the log-probability of each token in the sequence\n",
    "    # outputs.logits is the logits for all tokens in the vocabulary for each position in the sequence\n",
    "    log_probs = F.log_softmax(outputs.logits, dim=-1)\n",
    "    \n",
    "    # Extract the log-probability for the actual token that \n",
    "    # was generated at each position in the sequence.\n",
    "    return log_probs.gather(\n",
    "        dim=-1, \n",
    "        index=input_ids.unsqueeze(-1)\n",
    "    ).squeeze(-1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "464e9ac0-e59d-4807-a2d7-dcbb9e59a5c1",
   "metadata": {
    "height": 487
   },
   "outputs": [],
   "source": [
    "def grpo_loss(model, ref_model, prompt, completion, advantage):\n",
    "    input_ids, attention_mask, completion_mask = prepare_inputs(\n",
    "        prompt, completion\n",
    "    )\n",
    "\n",
    "    # Model forward\n",
    "    token_log_probs = compute_log_probs(\n",
    "        model, input_ids, attention_mask\n",
    "    )\n",
    "    with torch.no_grad():\n",
    "        ref_token_log_probs = compute_log_probs(\n",
    "            ref_model, input_ids, attention_mask\n",
    "    )\n",
    "\n",
    "    # ratio = p_model / p_ref = exp(log(p_model) - log(p_ref))\n",
    "    ratio = torch.exp(token_log_probs - ref_token_log_probs)\n",
    "\n",
    "    # Scale the ratio by the advantage function\n",
    "    policy_loss = ratio * advantage\n",
    "\n",
    "    # We want to maximize reward, so we make the loss negative \n",
    "    # because optimizers minimize loss.\n",
    "    per_token_loss = -policy_loss\n",
    "\n",
    "    # Only compute loss over the output tokens\n",
    "    loss = (per_token_loss * completion_mask).sum() / completion_mask.sum()\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "153fa8ea-74a2-4794-964f-82a6c516fb59",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": [
    "grpo_loss(model, ref_model, prompt, \"fence and\", advantage=2.0)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ee71f374",
   "metadata": {},
   "source": [
    "During the first step of training, the reference and policy models are identical. So the loss comes from the advantage of the reponse:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b5d9e2d1-6f00-4c5d-a4c1-54577cd4ecac",
   "metadata": {
    "height": 96
   },
   "outputs": [],
   "source": [
    "# At step 1, the model and reference model are the same\n",
    "# So the loss is the advantage function because the ratio of \n",
    "# per-token log-probabilities is 1\n",
    "grpo_loss(ref_model, ref_model, prompt, \"fence and\", advantage=2.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "93926344-69a3-4375-95b6-064116726716",
   "metadata": {
    "height": 268
   },
   "outputs": [],
   "source": [
    "completion = \"fence and\"\n",
    "\n",
    "input_ids, attention_mask, completion_mask = prepare_inputs(\n",
    "    prompt, completion\n",
    ")\n",
    "with torch.no_grad():\n",
    "    token_log_probs = compute_log_probs(\n",
    "        model, input_ids, attention_mask\n",
    "    )\n",
    "    ref_token_log_probs = compute_log_probs(\n",
    "        ref_model, input_ids, attention_mask\n",
    "    )\n",
    "\n",
    "ratio = torch.exp(token_log_probs - ref_token_log_probs)\n",
    "print(ratio)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "db453989",
   "metadata": {},
   "source": [
    "## Adding clipping to the policy loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "773e1a31-4e5b-430f-ad67-bd28e2363a3c",
   "metadata": {
    "height": 538
   },
   "outputs": [],
   "source": [
    "def grpo_loss_with_clip(model, ref_model, prompt, completion, advantage, epsilon=0.2):\n",
    "    input_ids, attention_mask, completion_mask = prepare_inputs(\n",
    "        prompt, completion\n",
    "    )\n",
    "\n",
    "    # Model forward\n",
    "    token_log_probs = compute_log_probs(\n",
    "        model, input_ids, attention_mask\n",
    "    )\n",
    "    with torch.no_grad():\n",
    "        ref_token_log_probs = compute_log_probs(\n",
    "            ref_model, input_ids, attention_mask\n",
    "    )\n",
    "\n",
    "    # ratio = p_model / p_ref = exp(log(p_model) - log(p_ref))\n",
    "    ratio = torch.exp(token_log_probs - ref_token_log_probs)\n",
    "\n",
    "    # Scale the ratio by the advantage function\n",
    "    unclipped = ratio * advantage\n",
    "    clipped = torch.clamp(ratio, 1 - epsilon, 1 + epsilon) * advantage\n",
    "\n",
    "    policy_loss = torch.min(unclipped, clipped)\n",
    "\n",
    "    # We want to maximize reward, so we make the loss negative \n",
    "    # because optimizers minimize loss.\n",
    "    per_token_loss = -policy_loss\n",
    "\n",
    "    # Only compute loss over the output tokens\n",
    "    loss = (per_token_loss * completion_mask).sum() / completion_mask.sum()\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c5956be-ea40-4ae4-b12e-5c14bfeb3a37",
   "metadata": {
    "height": 149
   },
   "outputs": [],
   "source": [
    "grpo_loss_with_clip(\n",
    "    model,\n",
    "    ref_model,\n",
    "    prompt,\n",
    "    \"fence and\",\n",
    "    advantage=2.0,\n",
    "    epsilon=0.2\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "05732407",
   "metadata": {},
   "source": [
    "Clipping has no effect during the first step of training - the loss is still the advantage:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b1907b5-035d-469e-a434-778dd049473f",
   "metadata": {
    "height": 181
   },
   "outputs": [],
   "source": [
    "# If we pass the reference model as also the model we're training, the ratio will be 1, \n",
    "# so your loss will be the advantage.\n",
    "grpo_loss_with_clip(\n",
    "    ref_model,\n",
    "    ref_model,\n",
    "    prompt,\n",
    "    \"fence and\",\n",
    "    advantage=2.0\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a132f44f",
   "metadata": {},
   "source": [
    "Check how many of the output tokens were clipped:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e757827c-f44e-4881-8321-be2b42c92171",
   "metadata": {
    "height": 368
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "completion = \"fence and\"\n",
    "\n",
    "input_ids, attention_mask, _ = prepare_inputs(prompt, completion)\n",
    "with torch.no_grad():\n",
    "    token_log_probs = compute_log_probs(\n",
    "        model, input_ids, attention_mask\n",
    "    )\n",
    "    ref_token_log_probs = compute_log_probs(\n",
    "        ref_model, input_ids, attention_mask\n",
    "    )\n",
    "\n",
    "with torch.no_grad():\n",
    "    epsilon = 0.2\n",
    "    ratio = torch.exp(token_log_probs - ref_token_log_probs)\n",
    "    ratio_unclipped = ratio\n",
    "    ratio_clipped = torch.clamp(ratio, 1 - epsilon, 1 + epsilon)\n",
    "\n",
    "visualize_clipped_ratios(ratio_unclipped[0][9:], ratio_clipped[0][9:], epsilon)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4b0b1a7e",
   "metadata": {},
   "source": [
    "## Adding KL Divergence to the loss "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fffdb3c6-eeaa-4ff2-a774-881e1f806a08",
   "metadata": {
    "height": 623
   },
   "outputs": [],
   "source": [
    "def grpo_loss_with_kl(model, ref_model, prompt, completion, advantage, epsilon=0.2, beta=0.1):\n",
    "    input_ids, attention_mask, completion_mask = prepare_inputs(\n",
    "        prompt, completion\n",
    "    )\n",
    "\n",
    "    # Model forward\n",
    "    token_log_probs = compute_log_probs(\n",
    "        model, input_ids, attention_mask\n",
    "    )\n",
    "    with torch.no_grad():\n",
    "        ref_token_log_probs = compute_log_probs(\n",
    "            ref_model, input_ids, attention_mask\n",
    "    )\n",
    "\n",
    "    # ratio = p_model / p_ref = exp(log(p_model) - log(p_ref))\n",
    "    ratio = torch.exp(token_log_probs - ref_token_log_probs)\n",
    "\n",
    "    # Scale the ratio by the advantage function\n",
    "    unclipped = ratio * advantage\n",
    "    clipped = torch.clamp(ratio, 1 - epsilon, 1 + epsilon) * advantage\n",
    "\n",
    "    policy_loss = torch.min(unclipped, clipped)\n",
    "\n",
    "    # Compute the per-token KL divergence to encourage the model \n",
    "    # to stay close to the reference model\n",
    "    delta = token_log_probs - ref_token_log_probs\n",
    "    per_token_kl = torch.exp(-delta) + delta - 1\n",
    "\n",
    "    # We want to maximize reward, so we make the loss negative \n",
    "    # because optimizers minimize loss.\n",
    "    per_token_loss = -(policy_loss - beta * per_token_kl)\n",
    "\n",
    "    # Only compute loss over the output tokens\n",
    "    loss = (per_token_loss * completion_mask).sum() / completion_mask.sum()\n",
    "    return loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f103ad0d-67e5-41f1-a2fc-28b140661465",
   "metadata": {
    "height": 419
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "# Define the range of Δ (log-probability difference between \n",
    "# model and reference)\n",
    "delta = np.linspace(-6, 6, 500)\n",
    "\n",
    "# Compute the per-token reverse KL divergence: KL(π_ref || π)\n",
    "kl_divergence = np.exp(-delta) + delta - 1\n",
    "\n",
    "# Plot the KL divergence\n",
    "plt.figure(figsize=(8, 5))\n",
    "plt.plot(delta, kl_divergence, label=r'$KL(\\pi_{\\mathrm{ref}} || \\pi) = e^{-\\Delta} + \\Delta - 1$')\n",
    "plt.axhline(0, color='gray', linestyle='--', linewidth=0.5)\n",
    "plt.axvline(0, color='gray', linestyle='--', linewidth=0.5)\n",
    "plt.fill_between(delta, kl_divergence, where=(delta > 0), color='red', alpha=0.3, label='Overconfident region (Δ > 0)')\n",
    "plt.fill_between(delta, kl_divergence, where=(delta < 0), color='green', alpha=0.3, label='Conservative region (Δ < 0)')\n",
    "plt.title(\"KL Divergence as 'Gravitational Pull' Toward Reference Policy\")\n",
    "plt.xlabel(r'$\\Delta = \\log \\pi - \\log \\pi_{\\mathrm{ref}}$')\n",
    "plt.ylabel('KL Penalty')\n",
    "plt.legend()\n",
    "plt.grid(True)\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7439ca7-1ca3-4ecf-8260-4ca30651191d",
   "metadata": {
    "height": 234
   },
   "outputs": [],
   "source": [
    "for beta in [0, 0.1, 0.5]:\n",
    "    loss = grpo_loss_with_kl(\n",
    "        model,\n",
    "        ref_model,\n",
    "        prompt,\n",
    "        \"fence and\",\n",
    "        advantage=2.0,\n",
    "        epsilon=0.2,\n",
    "        beta=beta\n",
    "    )\n",
    "    print(f\"beta={beta}\")\n",
    "    print(f\"loss={loss.item():.3f}\")\n",
    "    print()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
