{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "47065bdb",
   "metadata": {},
   "source": [
    "# SFT_GRPO_3: Reward functions with LLM as a judge"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9ac9fb57",
   "metadata": {},
   "source": [
    "Start by loading dependencies:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d1fb4c5-1951-4293-b643-0fca57f56a03",
   "metadata": {
    "height": 285
   },
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "from datasets import load_dataset\n",
    "from dotenv import load_dotenv\n",
    "from openai import OpenAI\n",
    "\n",
    "from utils import *\n",
    "\n",
    "load_dotenv(\"../.env\")\n",
    "\n",
    "client = OpenAI(api_key=os.environ[\"OPENAI_API_KEY\"])\n",
    "\n",
    "pb_client = OpenAI(\n",
    "    base_url=os.environ[\"PREDIBASE_MODEL_LLAMA_URL\"],\n",
    "    api_key=os.environ[\"PREDIBASE_API_KEY\"],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "618f630b",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "9dee0416",
   "metadata": {},
   "source": [
    "## The task: creating summaries of earnings call transcripts\n",
    "\n",
    "Start by loading the earnings call dataset from Hugging Face:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a3b6a7f2-1bd7-4a3f-a136-ff17b4db8e2d",
   "metadata": {
    "height": 64
   },
   "outputs": [],
   "source": [
    "ds = load_dataset(\"mrSoul7766/ECTSum\")\n",
    "transcript = ds[\"train\"][1][\"text\"]\n",
    "print(transcript[:1983])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1c1b6b47",
   "metadata": {},
   "source": [
    "Define a summarize prompt and helper function, then create and print a summary \n",
    "\n",
    "(Note: the MODEL_NAME is specified in the utils.py file: here you are using Llama-3.1-8B-Instruct-dequantized to generate the summaries.)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e64055e9-7219-445e-bfa1-d00c119fe30e",
   "metadata": {
    "height": 385
   },
   "outputs": [],
   "source": [
    "SUMMARIZE_PROMPT = \"\"\"Generate a concise summary of the information in the following earnings call transcript.\n",
    "\n",
    "Only respond with the summary, do not include any extraneous text.\n",
    "\n",
    "Transcript:\n",
    "\n",
    "{transcript}\n",
    "\"\"\"\n",
    "\n",
    "def summarize(transcript, n=1):\n",
    "    prompt = SUMMARIZE_PROMPT.format(transcript=transcript)\n",
    "    messages = [\n",
    "        {\"role\": \"user\", \"content\": prompt},\n",
    "    ]\n",
    "\n",
    "    return pb_client.chat.completions.create(\n",
    "        model=MODEL_NAME,\n",
    "        messages=messages,\n",
    "        n=n,\n",
    "        temperature=0.9,\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d29d9d2-cdc5-47b2-b6a3-ece3f7bdfeba",
   "metadata": {
    "height": 64
   },
   "outputs": [],
   "source": [
    "resp = summarize(transcript)\n",
    "summary = resp.choices[0].message.content\n",
    "print(summary)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "97645179-fee1-4abf-aaa9-864044c969f5",
   "metadata": {},
   "source": [
    "## Use an LLM as a judge of summary quality\n",
    "\n",
    "Define a prompt that will tell the OpenAI GPT-4o-mini model to assign a reward score to a summary:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7e51d32b-389b-4036-ab7e-93598d02213a",
   "metadata": {
    "height": 1031
   },
   "outputs": [],
   "source": [
    "JUDGE_PROMPT_V1 = \"\"\"\n",
    "Rate the following summary of an earnings call transcript on a \n",
    "scale from 1 to 10. \n",
    "\n",
    "1 means the summary is very poor, 10 means the summary is very good.\n",
    "\n",
    "Provide reasoning followed by the final score at the end \n",
    "surrounded by <score> tags.\n",
    "\n",
    "For example:\n",
    "\n",
    "<score>1</score>\n",
    "\n",
    "Transcript:\n",
    "\n",
    "{transcript}\n",
    "\n",
    "Summary:\n",
    "\n",
    "{summary}\n",
    "\"\"\"\n",
    "\n",
    "def judge_reward_v1(\n",
    "    transcript: str,\n",
    "    summary: str, \n",
    "    model: str = \"gpt-4o-mini\", \n",
    "    verbose: bool = False,\n",
    ") -> float:\n",
    "    prompt = JUDGE_PROMPT_V1.format(\n",
    "        transcript=transcript, \n",
    "        summary=summary,\n",
    "    )\n",
    "    messages = [\n",
    "        {\"role\": \"user\", \"content\": prompt},\n",
    "    ]\n",
    "\n",
    "    resp = client.chat.completions.create(\n",
    "        model=model,\n",
    "        messages=messages,\n",
    "        n=1,\n",
    "        temperature=0,\n",
    "        )\n",
    "    completion = resp.choices[0].message.content\n",
    "\n",
    "    if verbose:\n",
    "        print(completion)\n",
    "\n",
    "    try:\n",
    "        match = re.search(r\"<score>(\\d+)<\\/score>\", completion)\n",
    "        if match is None:\n",
    "            return 0\n",
    "    \n",
    "        # Extract the \"score\" part from the completion\n",
    "        score = match.group(1).strip()\n",
    "        score = int(score)\n",
    "    except:\n",
    "        score = 0\n",
    "    \n",
    "    return score / 10"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "58e525ad",
   "metadata": {},
   "source": [
    "Now score the summary you generated above using the new reward function:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1a0dc082-edfe-4f19-be8c-535595914f50",
   "metadata": {
    "height": 47
   },
   "outputs": [],
   "source": [
    "score = judge_reward_v1(transcript, summary, verbose=True)\n",
    "print(score)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "90fec318",
   "metadata": {},
   "source": [
    "Now generate 8 new summaries and score each one:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cfb20ac7-6a0c-4bd2-a325-35362a00bf60",
   "metadata": {
    "height": 47
   },
   "outputs": [],
   "source": [
    "resp = summarize(transcript, n=8)\n",
    "summaries = [choice.message.content for choice in resp.choices]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b8ad39e-6806-48d8-9636-5dd572e401f0",
   "metadata": {
    "height": 62
   },
   "outputs": [],
   "source": [
    "scores = [judge_reward_v1(transcript, summary) for summary in summaries]\n",
    "scores"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b1b30c08",
   "metadata": {},
   "source": [
    "## Taking a quiz to assign a reward score\n",
    "\n",
    "In this section, you'll create a multiple choice quiz that tests key facts from the earnings call transcript. You'll then ask another LLM to take the quiz using different call summaries, and use the quiz score as the reward score.\n",
    "\n",
    "Start by creating the quiz prompt:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2fedb41-a3ca-436f-b495-937ded53b05e",
   "metadata": {
    "height": 606
   },
   "outputs": [],
   "source": [
    "from pydantic import BaseModel\n",
    "from random import shuffle\n",
    "\n",
    "\n",
    "QUIZ_PROMPT = \"\"\"\n",
    "Generate a multiple-choice quiz based on the information \n",
    "in the following earnings call transcript.\n",
    "\n",
    "Example:\n",
    "\n",
    "```\n",
    "1. What was the q1 adjusted earnings per share?\n",
    "a) $3.34\n",
    "b) $5.32\n",
    "c) $2.49\n",
    "d) $7.78\n",
    "\n",
    "2. By what percent did same store sales rise in q1?\n",
    "a) 29.4%\n",
    "b) 32.1%\n",
    "c) 24.7%\n",
    "d) 21.2%\n",
    "\n",
    "===== ANSWERS =====\n",
    "1. a\n",
    "2. c\n",
    "```\n",
    "\n",
    "Limit the length of the quiz to the top 10 most relevant questions for financial analysts.\n",
    "\n",
    "Transcript:\n",
    "\n",
    "{transcript}\n",
    "\"\"\""
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1cc6fac5",
   "metadata": {},
   "source": [
    "Next, define pydantic classes that define the structure of an individual question, and a quiz comprised of multiple questions. Then define a helper function to create a quiz using structured response from GPT-4o-mini:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "31c0769d-ad40-4159-8c3a-3d57acd8cd6f",
   "metadata": {
    "height": 1031
   },
   "outputs": [],
   "source": [
    "class Question(BaseModel):\n",
    "    text: str\n",
    "    options: list[str]\n",
    "    answer: int\n",
    "\n",
    "    def shuffle_options(self) -> None:\n",
    "        \"\"\"Shuffle the options while preserving the correct answer\"\"\"\n",
    "        # Get the correct answer text\n",
    "        correct = self.options[self.answer]\n",
    "        \n",
    "        # Shuffle the options\n",
    "        shuffled = self.options.copy()\n",
    "        shuffle(shuffled)\n",
    "        \n",
    "        # Update the answer index to match new position\n",
    "        self.options = shuffled\n",
    "        self.answer = shuffled.index(correct)\n",
    "\n",
    "    def __str__(self) -> str:\n",
    "        \"\"\"Pretty print a single question\"\"\"\n",
    "        output = [self.text]\n",
    "        for i, option in enumerate(self.options):\n",
    "            output.append(f\"{chr(65+i)}. {option}\")\n",
    "        return \"\\n\".join(output)\n",
    "\n",
    "\n",
    "class Quiz(BaseModel):\n",
    "    questions: list[Question]\n",
    "\n",
    "    def shuffle_all_questions(self) -> None:\n",
    "        \"\"\"Shuffle the options for all questions in the quiz\"\"\"\n",
    "        for question in self.questions:\n",
    "            question.shuffle_options()\n",
    "    \n",
    "    def __str__(self) -> str:\n",
    "        \"\"\"Pretty print the entire quiz\"\"\"\n",
    "        output = []\n",
    "        for i, question in enumerate(self.questions, 1):\n",
    "            output.append(f\"\\nQuestion {i}:\")\n",
    "            output.append(str(question))\n",
    "        return \"\\n\".join(output)\n",
    "\n",
    "\n",
    "def create_quiz(transcript: str):\n",
    "    prompt = QUIZ_PROMPT.format(transcript=transcript)\n",
    "    messages = [\n",
    "        {\"role\": \"user\", \"content\": prompt},\n",
    "    ]\n",
    "    resp = client.beta.chat.completions.parse(\n",
    "        model=\"gpt-4o-mini\",\n",
    "        messages=messages,\n",
    "        temperature=0.7,\n",
    "        response_format=Quiz,\n",
    "    )\n",
    "\n",
    "    quiz = resp.choices[0].message.parsed\n",
    "    quiz.shuffle_all_questions()\n",
    "\n",
    "    return quiz"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ebaff5c7-7995-46fb-9083-08e0df679f90",
   "metadata": {
    "height": 47
   },
   "outputs": [],
   "source": [
    "quiz = create_quiz(transcript)\n",
    "print(quiz)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "af5a36a6",
   "metadata": {},
   "source": [
    "Now, define a function that asks an LLM to take a quiz, using a transcript summary as the source material:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "de5b7501-1557-40b7-acf4-8dbff0bcf6a0",
   "metadata": {
    "height": 861
   },
   "outputs": [],
   "source": [
    "letter_to_index = {\"A\": 0, \"B\": 1, \"C\": 2, \"D\": 3}\n",
    "index_to_letter = [\"A\", \"B\", \"C\", \"D\"]\n",
    "\n",
    "\n",
    "TAKE_QUIZ_PROMPT = \"\"\"Use the provided summary of a transcript \n",
    "to answer the following quiz.\n",
    "\n",
    "Quiz:\n",
    "\n",
    "{quiz}\n",
    "\n",
    "Summary:\n",
    "\n",
    "{summary}\n",
    "\n",
    "Respond with just a list of answers and no additional text, \n",
    "for example:\n",
    "\n",
    "[A, D, C, B, B, C, D, A, A, B]\n",
    "\n",
    "You must provide an answer for all 10 questions. \n",
    "If you don't know the answer, answer with \"0\" for that question. \n",
    "Example:\n",
    "\n",
    "[A, D, 0, B, B, C, D, A, A, B]\n",
    "\"\"\"\n",
    "\n",
    "def take_quiz(summary, quiz):\n",
    "    question_strs = []\n",
    "    for question in quiz.questions:\n",
    "        question_str = question.text\n",
    "        for i, option in enumerate(question.options):\n",
    "            letter = index_to_letter[i]\n",
    "            question_str += f\"\\n{letter}. {option}\"\n",
    "        question_strs.append(question_str)\n",
    "    quiz_str = \"\\n\\n\".join(question_strs)\n",
    "\n",
    "    prompt = TAKE_QUIZ_PROMPT.format(quiz=quiz_str, summary=summary)\n",
    "    resp = client.chat.completions.create(\n",
    "        model=\"gpt-4o-mini\",\n",
    "        messages=[{\"role\": \"user\", \"content\": prompt}],\n",
    "        temperature=0,\n",
    "    )\n",
    "    resp_str = resp.choices[0].message.content\n",
    "    \n",
    "    # Convert string representation of list to actual list of strings\n",
    "    answers = resp_str.strip('[]').split(', ')\n",
    "\n",
    "    return answers"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0d284eb9-c0a4-4ae4-bf7d-e99ff16f6dfa",
   "metadata": {
    "height": 47
   },
   "outputs": [],
   "source": [
    "answers = take_quiz(summaries[0], quiz)\n",
    "answers"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0e7b338d",
   "metadata": {},
   "source": [
    "Finally, score the LLM's answers to the quiz:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3724319d-4078-4842-aab7-1274f32765cd",
   "metadata": {
    "height": 183
   },
   "outputs": [],
   "source": [
    "def score_quiz_answers(answers, quiz):\n",
    "    assert len(answers) == len(quiz.questions)\n",
    "\n",
    "    total = len(answers)\n",
    "    correct = 0\n",
    "    for answer, question in zip(answers, quiz.questions):\n",
    "        expected_answer = index_to_letter[question.answer]\n",
    "        if answer == expected_answer:\n",
    "            correct += 1\n",
    "    return correct / total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "00bfebf7-3d36-4df2-9db2-fb4afee39328",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": [
    "score_quiz_answers(answers, quiz)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2c792e93",
   "metadata": {},
   "source": [
    "Finally, generate rewards and advantages for all 8 summaries you created earlier:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c1ddac2a-33ec-4a58-8831-170aee7e2a38",
   "metadata": {
    "height": 181
   },
   "outputs": [],
   "source": [
    "def print_quiz_table(all_answers, rewards):\n",
    "    advantages = compute_advantages(rewards)\n",
    "    length = len(all_answers)\n",
    "    elems = list(zip(range(length), rewards, advantages))\n",
    "\n",
    "    headers = [\"Index\", \"Reward\", \"Advantage\"]\n",
    "    table = tabulate(elems, headers=headers, tablefmt=\"grid\").split(\"\\n\")\n",
    "    for row in table:\n",
    "        print(row)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9fbd259d-bb52-48ac-84c3-a2313e30fddc",
   "metadata": {
    "height": 115
   },
   "outputs": [],
   "source": [
    "all_answers = []\n",
    "quiz_rewards = []\n",
    "for summary in summaries:\n",
    "    answers = take_quiz(summary, quiz)\n",
    "    all_answers.append(answers)\n",
    "    quiz_rewards.append(score_quiz_answers(answers, quiz))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1fc11146-6d19-4324-aea4-28ae2db28541",
   "metadata": {
    "height": 30
   },
   "outputs": [],
   "source": [
    "print_quiz_table(all_answers, quiz_rewards)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
