{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "This notebook contains scripts to analyze results from the HealthBench eval.\n",
    "\n",
    "Running the HealthBench eval in `simple-evals` yields files with names like `healthbench{OPTIONAL_SUBSET_NAME}_{MODEL_NAME}_{DATETIME}.json` and `*.allresults.json`, saved to your `/tmp/` folder by default.\n",
    "\n",
    "To analyze HealthBench results, move these to a desired folder, copy their file paths into the third cell of the `Imports and consts` section below, and then run the following analysis script.\n",
    "\n",
    "The key data includes the metrics in the `.json` file and the example-level metrics in the `metadata.example_level_metadata` subkey in the `allresults.json` file."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Imports and consts"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import json\n",
    "import pandas as pd\n",
    "from typing import Literal\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import textwrap\n",
    "import blobfile as bf\n",
    "from concurrent.futures import ThreadPoolExecutor\n",
    "import itertools\n",
    "from collections import Counter, defaultdict\n",
    "import matplotlib.dates as mdates\n",
    "import seaborn as sns\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.set_theme(\n",
    "    style=\"dark\",\n",
    "    palette=\"muted\",\n",
    "    font=\"serif\",\n",
    "    rc={\n",
    "        \"figure.dpi\": 120, # modify for paper figures\n",
    "        \"axes.titleweight\": \"normal\",\n",
    "        \"axes.labelweight\": \"normal\",\n",
    "        \"axes.spines.top\": False,\n",
    "        \"axes.spines.right\": False,\n",
    "        \"legend.frameon\": False,\n",
    "        \"figure.autolayout\": True,\n",
    "        \"legend.fontsize\": \"small\",\n",
    "        \"legend.title_fontsize\": \"medium\",\n",
    "        \"xtick.labelsize\": \"small\",\n",
    "        \"ytick.labelsize\": \"small\",\n",
    "    }\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "fp_main_eval = 'az://openaipublic/simple-evals/healthbench/2025-05-07-06-14-12_oss_eval.jsonl'\n",
    "fp_meta_eval = 'az://openaipublic/simple-evals/healthbench/2025-05-07-06-14-12_oss_meta_eval.jsonl'\n",
    "\n",
    "# what directory should results be loaded from?\n",
    "tmp_dir = '/Users/rahul/Documents/healthbench/data/'\n",
    "# what directory should results be saved to?\n",
    "results_dir = '/Users/rahul/Documents/healthbench/results/'\n",
    "\n",
    "main_filename_list = \"\"\"healthbench_gpt-3.5-turbo-0125_20250507_0653.json\n",
    "healthbench_gpt-4.1_20250507_0653.json\"\"\".splitlines()\n",
    "main_allresults_filename_list = [f.replace('.json', '_allresults.json') for f in main_filename_list]\n",
    "\n",
    "human_eval_filename_list = \"\"\"healthbench_apr_2025_reference_referencecompletions_20250507_0713.json\n",
    "healthbench_aug_2024_reference_referencecompletions_20250507_0713.json\n",
    "healthbench_apr_2025_reference_humanbaseline_20250507_0659.json\n",
    "healthbench_aug_2024_reference_humanbaseline_20250507_0659.json\n",
    "healthbench_no_reference_humanbaseline_20250507_0659.json\"\"\".splitlines()\n",
    "human_eval_allresults_filename_list = [f.replace('.json', '_allresults.json') for f in human_eval_filename_list]\n",
    "\n",
    "meta_eval_filename_str = \"\"\"healthbench_meta_gpt-4.1-mini_20250511_051648.json\"\"\".splitlines()\n",
    "meta_eval_allresults_filename_list = [f.replace('.json', '_allresults.json') for f in meta_eval_filename_str]\n",
    "\n",
    "hard_filename_list = \"\"\"healthbench_hard_o3_20250508_204645.json\n",
    "healthbench_hard_o1_20250508_204650.json\n",
    "healthbench_hard_gpt-4o-2024-08-06_20250508_204655.json\n",
    "healthbench_hard_gpt-4.1_20250508_204647.json\n",
    "healthbench_hard_gpt-3.5-turbo-0125_20250508_204657.json\"\"\".splitlines()\n",
    "hard_allresults_filename_list = [f.replace('.json', '_allresults.json') for f in hard_filename_list]\n",
    "\n",
    "consensus_set_filename_list = \"\"\"healthbench_consensus_gpt-3.5-turbo-0125_20250509_131818.json\n",
    "healthbench_consensus_gpt-4.1_20250509_131807.json\n",
    "healthbench_consensus_gpt-4o-2024-08-06_20250509_131821.json\n",
    "healthbench_consensus_o1_20250509_131813.json\n",
    "healthbench_consensus_o3_20250509_131804.json\"\"\".splitlines()\n",
    "consensus_allresults_filename_list = [f.replace('.json', '_allresults.json') for f in consensus_set_filename_list]\n",
    "\n",
    "# used for all analyses that require many replicates, including worst-at-k, inter-replicate variance, etc. everything here should have been run with n_repeats > 1\n",
    "many_replicate_filename_list = \"\"\"healthbench_o1_20250507_0653_allresults.json\n",
    "healthbench_o3_20250507_0653_allresults.json\"\"\".splitlines()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "model_first_release = {\n",
    "    \"o3\": \"2025-04-16\",\n",
    "    \"o4-mini\": \"2025-04-16\",\n",
    "    \"gpt-4.1\": \"2025-04-14\",\n",
    "    \"gpt-4.1-mini\": \"2025-04-14\",\n",
    "    \"gpt-4.1-nano\": \"2025-04-14\",\n",
    "    \"o1\": \"2024-12-05\",\n",
    "    \"o1-pro\": \"2024-12-05\",\n",
    "    \"o3-mini\": \"2025-01-31\",\n",
    "    \"o1-mini\": \"2024-09-12\",\n",
    "    \"o1-preview\": \"2024-09-12\",\n",
    "    \"gpt-4.5-preview\": \"2025-02-27\",\n",
    "    \"gpt-4o-2024-11-20\": \"2024-11-20\",\n",
    "    \"gpt-4o-2024-08-06\": \"2024-08-06\",\n",
    "    \"gpt-4o-2024-05-13\": \"2024-05-13\",\n",
    "    \"chatgpt-4o-latest\": \"2025-03-27\",\n",
    "    \"gpt-4o-mini\": \"2024-07-18\",\n",
    "    \"gpt-4-turbo-2024-04-09\": \"2024-04-09\",\n",
    "    \"gpt-3.5-turbo-0125\": \"2024-01-25\",\n",
    "    \"gpt-4-0613\": \"2023-06-13\",\n",
    "}\n",
    "\n",
    "models_to_canonical_name = {\n",
    "    'o3': \"o3\",\n",
    "    'o3_high': \"o3-high\",\n",
    "    'o3_low': \"o3-low\",\n",
    "    'o4-mini': \"o4-mini\",\n",
    "    'o4-mini_high': \"o4-mini-high\",\n",
    "    'o4-mini_low': \"o4-mini-low\",\n",
    "    'gpt-4.1': \"GPT-4.1\",\n",
    "    'gpt-4.1-mini': \"GPT-4.1 mini\",\n",
    "    'gpt-4.1-nano': \"GPT-4.1 nano\",\n",
    "    'o1': \"o1\",\n",
    "    \"o1_high\": \"o1-high\",\n",
    "    \"o1_low\": \"o1-low\",\n",
    "    'o1-pro': \"o1-pro\",\n",
    "    'o1-preview': \"o1-preview\",\n",
    "    'o1-mini': \"o1-mini\",\n",
    "    'o3-mini': \"o3-mini\",\n",
    "    'o3-mini_low': \"o3-mini-low\",\n",
    "    'o3-mini_high': \"o3-mini-high\",\n",
    "    'gpt-4.5-preview': \"gpt-4.5-preview\",\n",
    "    'gpt-4o-2024-11-20': \"GPT-4o (Nov 2024)\",\n",
    "    'gpt-4o-2024-08-06': \"GPT-4o (Aug 2024)\",\n",
    "    'gpt-4o-2024-05-13': \"GPT-4o (May 2024)\",\n",
    "    'gpt-4o': \"GPT-4o (Aug 2024)\",\n",
    "    'chatgpt-4o-latest': \"ChatGPT-4o (latest)\",\n",
    "    'gpt-4o-mini': \"GPT-4o mini\",\n",
    "    'gpt-4-turbo-2024-04-09': \"GPT-4 Turbo\",\n",
    "    'gpt-3.5-turbo-0125': \"GPT-3.5 Turbo\",\n",
    "    'gpt-4-0613': \"GPT-4\",\n",
    "    'apr_2025_reference_humanbaseline': \"Physicians with Apr 2025 models\",\n",
    "    'apr_2025_reference_referencecompletions': \"Apr 2025 model reference responses\",\n",
    "    'aug_2024_reference_humanbaseline': \"Physicians with Sep 2024 models\",\n",
    "    'aug_2024_reference_referencecompletions': \"Sep 2024 model reference responses\",\n",
    "    'no_reference_humanbaseline': \"Physicians with no reference\",\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "CANONICAL_CLUSTER_NAMES = {\n",
    "    'hedging': \"Responding under uncertainty\",\n",
    "    'health': \"Health data tasks\",\n",
    "    'global': \"Global health\",\n",
    "    \"communication\": \"Expertise-tailored communication\",\n",
    "    \"context\": \"Context seeking\",\n",
    "    \"emergency\": \"Emergency referrals\",\n",
    "    \"complex\": \"Response depth\"\n",
    "}\n",
    "CLUSTER_SORT_ORDER = [\n",
    "    'emergency_referrals',\n",
    "    'communication',\n",
    "    'hedging',\n",
    "    'complex_responses',\n",
    "    'health_data_tasks',\n",
    "    'global_health',\n",
    "    'context_seeking',\n",
    "]\n",
    "FULL_CLUSTER_NAMES = {\n",
    "    'communication',\n",
    "    'emergency_referrals',\n",
    "    'global_health',\n",
    "    'health_data_tasks',\n",
    "    'context_seeking',\n",
    "    'complex_responses',\n",
    "    'hedging',\n",
    "}\n",
    "CLUSTER_SORT_ORDER_SHORT = [c.split(\"_\", 1)[0] for c in CLUSTER_SORT_ORDER]\n",
    "CANONICAL_AXIS_NAMES = {\n",
    "    \"communication_quality\": \"Communication quality\",\n",
    "    \"instruction_following\": \"Instruction following\",\n",
    "    \"accuracy\": \"Accuracy\",\n",
    "    \"completeness\": \"Completeness\",\n",
    "    \"context_awareness\": \"Context awareness\",\n",
    "}\n",
    "AXIS_SORT_ORDER = [\n",
    "    'communication_quality',\n",
    "    'instruction_following',\n",
    "    'accuracy',\n",
    "    'context_awareness',\n",
    "    'completeness',\n",
    "]\n",
    "\n",
    "HUMAN_EVAL_SORT_ORDER = [\n",
    "    'apr_2025_reference_humanbaseline',\n",
    "    'apr_2025_reference_referencecompletions',\n",
    "    'aug_2024_reference_humanbaseline',\n",
    "    'aug_2024_reference_referencecompletions',\n",
    "    'no_reference_humanbaseline',\n",
    "]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Descriptive stats"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def _strip_prefix(tag: str) -> str:\n",
    "    \"\"\"\n",
    "    Remove known prefixes from a tag and return a more human-readable string.\n",
    "    \"\"\"\n",
    "    if \":\" in tag:\n",
    "        tag = tag.split(\":\", 1)[1]\n",
    "    return tag.replace(\"_\", \" \").replace(\"-\", \" \").capitalize()\n",
    "\n",
    "\n",
    "def get_example_summary(states: list[dict]) -> pd.DataFrame:\n",
    "    row_labels: list[str] = []\n",
    "    row_values: list[dict[str, str]] = []\n",
    "\n",
    "    # Overall total\n",
    "    total_examples: int = len(states)\n",
    "    row_labels.append(r\"\\textbf{Total number of HealthBench examples}\")\n",
    "    row_values.append({\"Count\": f\"\\\\textbf{{{total_examples}}}\", \"Percent\": f\"\\\\textbf{{{100.0:.1f}}}\"})\n",
    "\n",
    "    # Themes and their physician categories\n",
    "    theme_counts: Counter[str] = Counter()\n",
    "    category_counts: Counter[tuple[str, str]] = Counter()\n",
    "    for state in states:\n",
    "        theme_tags = [t for t in state[\"example_tags\"] if t.startswith(\"theme:\")]\n",
    "        assert len(theme_tags) == 1, f\"Expected 1 theme tag, got {len(theme_tags)} for state {state}\"\n",
    "        theme_tag = theme_tags[0]\n",
    "        if theme_tag is None:\n",
    "            continue\n",
    "        theme_counts[theme_tag] += 1\n",
    "\n",
    "        for t in state[\"example_tags\"]:\n",
    "            if t.startswith(\"physician_agreed_category:\"):\n",
    "                category_counts[(theme_tag, t)] += 1\n",
    "\n",
    "    for theme_tag, n_theme in theme_counts.most_common():\n",
    "        theme_clean = _strip_prefix(theme_tag)\n",
    "\n",
    "        # Theme row (bolded)\n",
    "        row_labels.append(r\"\\textbf{\" + theme_clean + \"}\")\n",
    "        row_values.append(\n",
    "            {\n",
    "                \"Count\": f\"\\\\textbf{{{n_theme}}}\",\n",
    "                \"Percent\": f\"\\\\textbf{{{n_theme / total_examples * 100:.1f}}}\",\n",
    "            }\n",
    "        )\n",
    "\n",
    "        # Associated physician-agreed categories, indented by one tab\n",
    "        cat_items = []\n",
    "        for (ex_theme_tag, category_tag), n_in_category in category_counts.items():\n",
    "            if ex_theme_tag == theme_tag:\n",
    "                cat_items.append((category_tag, n_in_category))\n",
    "        cat_items.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "        for category_tag, n_in_category in cat_items:\n",
    "            category_clean = _strip_prefix(category_tag)\n",
    "            row_labels.append(\"\\qquad \" + category_clean)\n",
    "            row_values.append(\n",
    "                {\n",
    "                    \"Count\": str(n_in_category),\n",
    "                    \"Percent\": f\"{n_in_category / n_theme * 100:.1f}\",\n",
    "                }\n",
    "            )\n",
    "\n",
    "    df_examples = pd.DataFrame(row_values, index=row_labels)\n",
    "    df_examples.index.name = \"Theme and clusters\"\n",
    "    return df_examples\n",
    "\n",
    "def get_rubric_criteria_summary(states: list[dict]) -> pd.DataFrame:\n",
    "    axis_counts  = Counter()\n",
    "    level_counts = Counter()\n",
    "\n",
    "    for state in states:\n",
    "        for rubric in state[\"rubrics\"]:\n",
    "            axis_tags = [t for t in rubric[\"tags\"] if isinstance(t, str) and t.startswith(\"axis:\")]\n",
    "            assert len(axis_tags) == 1, f\"Expected 1 axis tag, got {len(axis_tags)} for rubric {rubric}\"\n",
    "            axis_tag = axis_tags[0]\n",
    "            axis_counts[axis_tag] += 1\n",
    "\n",
    "            level_tags = [t for t in rubric[\"tags\"] if isinstance(t, str) and t.startswith(\"level:\")]\n",
    "            assert len(level_tags) == 1, f\"Expected 1 level tag, got {len(level_tags)} for rubric {rubric}\"\n",
    "            level_tag = level_tags[0]\n",
    "            level_counts[level_tag] += 1\n",
    "\n",
    "    index_labels: list[str] = []\n",
    "    data_rows: list[dict[str, float | str]] = []\n",
    "\n",
    "    def _add_row(label: str, count: int | float | str, pct: float | str):\n",
    "        index_labels.append(label)\n",
    "        data_rows.append({\"Count\": count, \"Percent\": pct})\n",
    "\n",
    "    # (i) Overall (bold)\n",
    "    total_rubrics = sum(axis_counts.values())\n",
    "    _add_row(r\"\\textbf{All rubric criteria}\", f\"\\\\textbf{{{total_rubrics}}}\", f\"\\\\textbf{{{100.00:.2f}}}\")\n",
    "\n",
    "    # (ii) Level breakdown (bold for Level, indented for subrows)\n",
    "    n_cluster = level_counts.get(\"level:cluster\", 0)\n",
    "    n_example = level_counts.get(\"level:example\", 0)\n",
    "    n_level = n_cluster + n_example\n",
    "    assert n_level == total_rubrics, f\"Expected n_level to equal total_rubrics, but got {n_level} != {total_rubrics}\"\n",
    "    _add_row(r\"\\textbf{Level}\", f\"\\\\textbf{{{n_level}}}\", f\"\\\\textbf{{{n_level / total_rubrics * 100:.2f}}}\")\n",
    "    _add_row(r\"\\qquad Cluster\", n_cluster, f\"{n_cluster / total_rubrics * 100:.2f}\")\n",
    "    _add_row(r\"\\qquad Example-specific\", n_example, f\"{n_example / total_rubrics * 100:.2f}\")\n",
    "\n",
    "    # (iii) Axis breakdown (bold for Axis, indented for subrows)\n",
    "    _add_row(r\"\\textbf{Axis}\", f\"\\\\textbf{{{total_rubrics}}}\", f\"\\\\textbf{{{100.00:.2f}}}\")\n",
    "    for axis_tag, n_axis in axis_counts.most_common():\n",
    "        axis_clean = _strip_prefix(axis_tag)\n",
    "        _add_row(r\"\\qquad \" + axis_clean, n_axis, f\"{n_axis / total_rubrics * 100:.2f}\")\n",
    "\n",
    "    df_rubric_criteria = pd.DataFrame(data_rows, index=index_labels)\n",
    "    df_rubric_criteria.index.name = \"Category\"\n",
    "\n",
    "    return df_rubric_criteria"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "main_eval_states = []\n",
    "with bf.BlobFile(fp_main_eval, 'r') as f:\n",
    "    for line in f:\n",
    "        main_eval_states.append(json.loads(line))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_examples = get_example_summary(main_eval_states)\n",
    "\n",
    "display(df_examples)\n",
    "print(df_examples.to_latex())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df_rubric_criteria = get_rubric_criteria_summary(main_eval_states)\n",
    "display(df_rubric_criteria)\n",
    "print(df_rubric_criteria.to_latex(escape=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "n_turns_list = []\n",
    "n_rubrics_list = []\n",
    "n_chars_list = []\n",
    "for example in main_eval_states:\n",
    "    n_rubrics_list.append(len(example[\"rubrics\"]))\n",
    "    n_turns_list.append(len(example[\"prompt\"]))\n",
    "    n_chars = 0\n",
    "    for turn in example[\"prompt\"]:\n",
    "        n_chars += len(turn[\"content\"])\n",
    "    n_chars_list.append(n_chars)\n",
    "\n",
    "quantiles = [0, 0.05, 0.25, 0.5, 0.75, 0.95, 1.0]\n",
    "n_turns_quantiles = np.quantile(n_turns_list, quantiles)\n",
    "n_rubrics_quantiles = np.quantile(n_rubrics_list, quantiles)\n",
    "n_chars_quantiles = np.quantile(n_chars_list, quantiles)\n",
    "\n",
    "# Also compute means\n",
    "n_turns_mean = np.mean(n_turns_list)\n",
    "n_rubrics_mean = np.mean(n_rubrics_list)\n",
    "n_chars_mean = np.mean(n_chars_list)\n",
    "\n",
    "df_quantiles = pd.DataFrame({\n",
    "    \"n_turns\": n_turns_quantiles.astype(int),\n",
    "    \"n_rubrics\": n_rubrics_quantiles.astype(int),\n",
    "    \"n_chars\": n_chars_quantiles.astype(int),\n",
    "}, index=[f\"{int(q*100)}%\" for q in quantiles])\n",
    "\n",
    "# Insert mean as a new row after the 50% (median) row\n",
    "mean_row = pd.DataFrame({\n",
    "    \"n_turns\": [n_turns_mean],\n",
    "    \"n_rubrics\": [n_rubrics_mean],\n",
    "    \"n_chars\": [n_chars_mean],\n",
    "}, index=[\"mean\"])\n",
    "\n",
    "# Find the position of the 50% row\n",
    "median_idx = list(df_quantiles.index).index(\"50%\")\n",
    "# Split and insert mean after median\n",
    "df_quantiles = pd.concat([\n",
    "    df_quantiles.iloc[:median_idx+1],\n",
    "    mean_row,\n",
    "    df_quantiles.iloc[median_idx+1:]\n",
    "])\n",
    "\n",
    "display(df_quantiles)\n",
    "print(df_quantiles.to_latex())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# how many examples have at least one clustered criterion?\n",
    "n_states_with_clustered_criterion = 0\n",
    "n_clustered_criteria = 0\n",
    "unique_clustered_criteria = set()\n",
    "for state in main_eval_states:\n",
    "    has_clustered_criterion = False\n",
    "    for rubric in state[\"rubrics\"]:\n",
    "        if \"level:cluster\" in rubric[\"tags\"]:\n",
    "            has_clustered_criterion = True\n",
    "            unique_clustered_criteria.add(rubric[\"criterion\"])\n",
    "\n",
    "    if has_clustered_criterion:\n",
    "        n_states_with_clustered_criterion += 1\n",
    "\n",
    "print(f\"Number of examples with at least one clustered criterion: {n_states_with_clustered_criterion}\")\n",
    "print(f\"Number of unique clustered criteria: {len(unique_clustered_criteria)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# how many examples have at least one clustered criterion?\n",
    "unique_criteria = set()\n",
    "already_seen_criteria = set()\n",
    "for state in main_eval_states:\n",
    "    for rubric in state[\"rubrics\"]:\n",
    "        if rubric[\"criterion\"] in unique_criteria:\n",
    "            already_seen_criteria.add(rubric[\"criterion\"])\n",
    "        unique_criteria.add(rubric[\"criterion\"])\n",
    "\n",
    "print(f\"Number of unique criteria: {len(unique_criteria)}\")\n",
    "print(f\"Number of criteria seen more than once: {len(already_seen_criteria)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "meta_eval_states = []\n",
    "with bf.BlobFile(fp_meta_eval, 'r') as f:\n",
    "    for line in f:\n",
    "        meta_eval_states.append(json.loads(line))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# as defined in the paper, a meta-eval example is a tuple of (rubric criterion / category, conversation, response, physician grade)\n",
    "meta_eval_examples = [\n",
    "    {\n",
    "        'category': s['category'],\n",
    "        'conversation': s['prompt'],\n",
    "        'response': s['completion'],\n",
    "        'binary_label': label\n",
    "    }\n",
    "    for s in meta_eval_states\n",
    "    for label in s['binary_labels']\n",
    "]\n",
    "\n",
    "meta_category_counts = Counter(s['category'] for s in meta_eval_examples)\n",
    "meta_category_count_vals = list(meta_category_counts.values())\n",
    "len(meta_eval_examples), min(meta_category_count_vals), max(meta_category_count_vals), np.mean(meta_category_count_vals)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data loading code"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def filename_to_model(filename: str) -> str:\n",
    "    if '_allresults' in filename:\n",
    "        second_split_index = 3\n",
    "    else:\n",
    "        second_split_index = 2\n",
    "    return filename.split('_', 1)[1].rsplit('_', second_split_index)[0]\n",
    "\n",
    "def load_results(filename: str) -> dict:\n",
    "    fp = os.path.join(tmp_dir, filename)\n",
    "    results = json.load(open(fp))\n",
    "    print(f'Loaded {filename}')\n",
    "    return results\n",
    "\n",
    "def get_results_by_filename(filenames: list[str]) -> dict:\n",
    "    results_by_model = {\n",
    "        filename_to_model(f): load_results(f)\n",
    "        for f in filenames\n",
    "    }\n",
    "    return results_by_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_df_from_results_by_model(results_by_model: dict) -> pd.DataFrame:\n",
    "    metric_names_by_model = []\n",
    "    for model in results_by_model:\n",
    "        metric_names = [k for k in results_by_model[model].keys() if not k.endswith('bootstrap_std') and not k.endswith('n_samples')]\n",
    "        names_to_append = set()\n",
    "        for metric_name in metric_names:\n",
    "            if metric_name == \"score\":\n",
    "                continue\n",
    "            assert metric_name + ':bootstrap_std' in results_by_model[model]\n",
    "            assert metric_name + ':n_samples' in results_by_model[model]\n",
    "            names_to_append.add(metric_name)\n",
    "        metric_names_by_model.append(names_to_append)\n",
    "\n",
    "    assert all(metric_names_by_model[0] == metric_names for metric_names in metric_names_by_model)\n",
    "    metric_names = metric_names_by_model[0]\n",
    "\n",
    "    rows = []\n",
    "    for model, metrics in results_by_model.items():\n",
    "        for item in metric_names:\n",
    "            row = {\n",
    "                \"model\": model,\n",
    "                \"metric\": item,\n",
    "                \"value\": metrics[item],\n",
    "                \"bootstrap_std\": metrics[item + \":bootstrap_std\"],\n",
    "                \"n_samples\": metrics[item + \":n_samples\"],\n",
    "            }\n",
    "            rows.append(row)\n",
    "    df = pd.DataFrame(rows)\n",
    "    df.set_index([\"model\", \"metric\"], inplace=True)\n",
    "\n",
    "    return df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Clustered bar plots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def wrap_label(label, width):\n",
    "        return \"\\n\".join(textwrap.wrap(label, width=width, break_long_words=False, replace_whitespace=False))\n",
    "\n",
    "def clustered_bar_plot(\n",
    "    results_by_model: dict,\n",
    "    theme_or_axis: Literal[\"theme\", \"axis\"],\n",
    "    ylabel: str,\n",
    "    title: str,\n",
    "    sort_order: list[str] | None = None,\n",
    "    cluster_sort_order: list[str] | None = None,\n",
    "    error_as_yaxis: bool = False,\n",
    ") -> pd.DataFrame:\n",
    "    # get the results\n",
    "    df = get_df_from_results_by_model(results_by_model)\n",
    "\n",
    "    # get df by only theme or axis\n",
    "    clustered_df = df[df.index.get_level_values(\"metric\").str.startswith(f'{theme_or_axis}:')].copy()\n",
    "\n",
    "    # get models and clusters\n",
    "    models = clustered_df.index.get_level_values(\"model\").unique()\n",
    "    clusters = [m.replace(f\"{theme_or_axis}:\", \"\") for m in clustered_df.index.get_level_values(\"metric\").unique()]\n",
    "    if cluster_sort_order is not None:\n",
    "        assert set(cluster_sort_order) == set(clusters), f\"cluster_sort_order must be the same as clusters, but got {cluster_sort_order} vs {clusters}\"\n",
    "        clusters = cluster_sort_order\n",
    "\n",
    "    # map clusters to canonical names\n",
    "    if theme_or_axis == \"theme\":\n",
    "        mapped_clusters = [CANONICAL_CLUSTER_NAMES[c.split(\"_\", 1)[0]] for c in clusters]\n",
    "    elif theme_or_axis == \"axis\":\n",
    "        mapped_clusters = [CANONICAL_AXIS_NAMES[c] for c in clusters]\n",
    "    else:\n",
    "        raise ValueError(f\"Invalid theme_or_axis: {theme_or_axis}\")\n",
    "    mapped_clusters = [c for c in mapped_clusters]\n",
    "\n",
    "    # sort models\n",
    "    if sort_order is not None:\n",
    "        assert set(sort_order) == set(models), f\"sort_order must be the same as models, but got {sort_order} vs {models}\"\n",
    "        sorted_models = sort_order\n",
    "    else:\n",
    "        model_scores = []\n",
    "        for model in models:\n",
    "            score_idx = (model, \"overall_score\")\n",
    "            if score_idx in df.index:\n",
    "                score = df.loc[score_idx, \"value\"]\n",
    "            else:\n",
    "                score = np.nan\n",
    "            model_scores.append((model, score))\n",
    "\n",
    "        model_scores_sorted = sorted(model_scores, key=lambda x: (-x[1], x[0]))\n",
    "        sorted_models = [m for m, s in model_scores_sorted]\n",
    "\n",
    "    # plot\n",
    "    bar_width = 0.8 / len(clusters)\n",
    "    x = np.arange(len(sorted_models))\n",
    "\n",
    "    fig, ax = plt.subplots(figsize=(12, 6))\n",
    "    sns.color_palette(\"colorblind\")\n",
    "\n",
    "    colors = sns.color_palette(n_colors=len(clusters))\n",
    "\n",
    "    data_to_write = []\n",
    "\n",
    "    bars = []\n",
    "    for i, (cluster, mapped_cluster) in enumerate(zip(clusters, mapped_clusters)):\n",
    "        values = []\n",
    "        stds = []\n",
    "        for model in sorted_models:\n",
    "            idx = (model, f\"{theme_or_axis}:{cluster}\")\n",
    "            if idx in clustered_df.index:\n",
    "                if error_as_yaxis:\n",
    "                    values.append(1 - clustered_df.loc[idx, \"value\"])\n",
    "                else:\n",
    "                    values.append(clustered_df.loc[idx, \"value\"])\n",
    "                stds.append(clustered_df.loc[idx, \"bootstrap_std\"])\n",
    "            else:\n",
    "                values.append(np.nan)\n",
    "                stds.append(0)\n",
    "        bar = ax.bar(\n",
    "            x + i * bar_width,\n",
    "            values,\n",
    "            width=bar_width,\n",
    "            yerr=stds,\n",
    "            label=wrap_label(mapped_cluster, width=5),\n",
    "            capsize=2,\n",
    "            align=\"edge\",\n",
    "            color=colors[i],\n",
    "        )\n",
    "        bars.append(bar)\n",
    "\n",
    "        for model, val, std in zip(sorted_models, values, stds, strict=True):\n",
    "            data_to_write.append({\n",
    "                \"model\": models_to_canonical_name[model],\n",
    "                \"theme\": mapped_cluster,\n",
    "                \"success_rate\": val,\n",
    "                \"success_rate_lower\": max(0, val - std),\n",
    "                \"success_rate_upper\": min(1, val + std),\n",
    "            })\n",
    "\n",
    "    ax.set_xticks(x + bar_width * (len(clusters) - 1) / 2)\n",
    "\n",
    "    wrapped_labels = [wrap_label(models_to_canonical_name[m], width=10) for m in sorted_models]\n",
    "    ax.set_xticklabels(wrapped_labels)\n",
    "\n",
    "    ax.set_ylabel(ylabel)\n",
    "    ax.set_title(title)\n",
    "\n",
    "    legend_handles = [bars[i][0] for i in range(len(clusters))]\n",
    "    wrapped_mapped_clusters = [wrap_label(c, width=20) for c in mapped_clusters]\n",
    "    ax.legend(legend_handles, wrapped_mapped_clusters, title=theme_or_axis.capitalize(), bbox_to_anchor=(1.05, 1), loc=\"upper left\")\n",
    "\n",
    "    for i, model in enumerate(sorted_models):\n",
    "        score_idx = (model, \"overall_score\")\n",
    "        if score_idx in df.index:\n",
    "            if error_as_yaxis:\n",
    "                score = 1 - df.loc[score_idx, \"value\"]\n",
    "            else:\n",
    "                score = df.loc[score_idx, \"value\"]\n",
    "            left = x[i]\n",
    "            right = x[i] + bar_width * len(clusters)\n",
    "            ax.hlines(\n",
    "                y=score,\n",
    "                xmin=left,\n",
    "                xmax=right,\n",
    "                colors=\"black\",\n",
    "                linestyles=\"dashed\",\n",
    "                linewidth=1.5,\n",
    "                label=None if i > 0 else \"Model Score\"\n",
    "            )\n",
    "            ax.text(\n",
    "                left + (right - left) / 2,\n",
    "                score,\n",
    "                f\"{score:.2f}\",\n",
    "                ha=\"center\",\n",
    "                va=\"bottom\",\n",
    "                fontsize=9,\n",
    "                color=\"black\",\n",
    "                fontweight=\"bold\",\n",
    "                bbox=dict(facecolor=\"white\", edgecolor=\"none\", alpha=0.7, pad=0.5)\n",
    "            )\n",
    "\n",
    "            data_to_write.append({\n",
    "                \"model\": models_to_canonical_name[model],\n",
    "                \"theme\": None,\n",
    "                \"success_rate\": score,\n",
    "                \"success_rate_lower\": None,\n",
    "                \"success_rate_upper\": None,\n",
    "            })\n",
    "\n",
    "    # Ensure y-axis starts at zero\n",
    "    ax.set_ylim(bottom=0)\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return pd.DataFrame(data_to_write)\n",
    "\n",
    "def save_csv_and_print(df, filename):\n",
    "    csv = df.to_csv()\n",
    "    path = bf.join(results_dir, filename)\n",
    "    bf.write_text(path, csv)\n",
    "    print(path)\n",
    "    return df"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### By theme"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "main_eval_results_by_model = get_results_by_filename(main_filename_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "theme_df = clustered_bar_plot(main_eval_results_by_model, \"theme\", ylabel = \"Score\", title = \"HealthBench scores by theme\", cluster_sort_order = CLUSTER_SORT_ORDER)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(theme_df, 'theme_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### By axis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "axis_df = clustered_bar_plot(main_eval_results_by_model, \"axis\", ylabel = \"Score\", title = \"HealthBench scores by axis\", cluster_sort_order = AXIS_SORT_ORDER)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(axis_df, 'axis_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Hard subset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "results_by_model_hard = get_results_by_filename(hard_filename_list)\n",
    "results_by_model_hard = {\n",
    "    k.replace('hard_', ''): v  # normalize the model names\n",
    "    for k, v in results_by_model_hard.items()\n",
    "}\n",
    "hard_df = clustered_bar_plot(results_by_model_hard, theme_or_axis = \"axis\", ylabel = \"Score\", title = \"HealthBench Hard subset scores by axis\", sort_order = None, cluster_sort_order = AXIS_SORT_ORDER)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(hard_df, 'hard_subset_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Consensus subset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "results_by_model_consensus = get_results_by_filename(consensus_set_filename_list)\n",
    "results_by_model_consensus = {\n",
    "    k.replace('consensus_', ''): v\n",
    "    for k, v in results_by_model_consensus.items()\n",
    "}\n",
    "consensus_df = clustered_bar_plot(results_by_model_consensus, theme_or_axis = \"theme\", ylabel = \"Error rate (1 − score)\", title = \"HealthBench Consensus subset error rates by theme\", sort_order = None, cluster_sort_order = CLUSTER_SORT_ORDER, error_as_yaxis = True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(consensus_df, 'consensus_subset_data.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "CONSENSUS_PROMPT_TAX_NAME_MAPPER = {\n",
    "    'health-professional': \"Health professional user\",\n",
    "    'not-health-professional': \"Non-health professional user\",\n",
    "    'detailed': \"Query requiring detailed response\",\n",
    "    'simple': \"Query requiring simple response\",\n",
    "    'enough-context': \"Enough context provided\",\n",
    "    'not-enough-context': \"Not enough context provided\",\n",
    "    'conditionally-emergent': \"Conditionally emergent\",\n",
    "    'emergent': \"Emergent\",\n",
    "    'non-emergent': \"Non-emergent\",\n",
    "    'context-does-not-matter': \"Healthcare context does not matter\",\n",
    "    'context-matters-but-unclear': \"Healthcare context matters but is unclear\",\n",
    "    'context-matters-is-clear': \"Healthcare context matters and is clear\",\n",
    "    'enough-info-to-complete-task': \"Enough information to complete task\",\n",
    "    'not-enough-info-to-complete-task': \"Not enough information to complete task\",\n",
    "    'any-reducible-uncertainty': \"Any reducible uncertainty\",\n",
    "    'no-uncertainty': \"No uncertainty\",\n",
    "    'only-irreducible-uncertainty': \"Only irreducible uncertainty\",\n",
    "}\n",
    "\n",
    "QUESTION_NAME_MAPPER = {\n",
    "    'accuracy_completeness': \"Accuracy and completeness\",\n",
    "    'tailored': \"Tailored communication\",\n",
    "    'accuracy_hedging': \"Accuracy and hedging\",\n",
    "    'appropriate': \"Appropriate\",\n",
    "    'helpful_safe': \"Helpful and safe\",\n",
    "    'precise': \"Precise\",\n",
    "    'context_seeking': \"Context seeking\",\n",
    "    'emergency_behavior': \"Emergency behavior\",\n",
    "    'aligned_accurate': \"Aligned and accurate\",\n",
    "    'language': \"Language\",\n",
    "    'accuracy_safety': \"Accuracy and safety\",\n",
    "    'response_instruction_following': \"Response instruction following\",\n",
    "    'helpfulness': \"Helpfulness\",\n",
    "    'safety': \"Safety\",\n",
    "    'accurate': \"Accuracy\",\n",
    "    'hedges': \"Hedging behavior\",\n",
    "    'seeks_context': \"Context-seeking behavior\",\n",
    "}\n",
    "\n",
    "def key_to_canonical_name(key: str) -> tuple[str, str | None, str | None]:\n",
    "    if key == 'overall_score':\n",
    "        return ('Overall score', '', '')\n",
    "\n",
    "    assert key.startswith('cluster:')\n",
    "    target_cluster = None\n",
    "    for cluster in FULL_CLUSTER_NAMES:\n",
    "        if key.startswith(f'cluster:{cluster}'):\n",
    "            target_cluster = cluster\n",
    "\n",
    "            break\n",
    "\n",
    "    if target_cluster is None:\n",
    "        raise ValueError(f\"Invalid key: {key}\")\n",
    "\n",
    "    key = key.replace(f'cluster:{target_cluster}_', '')\n",
    "\n",
    "    consensus_prompt_tax_name, question_name = key.split('_', 1)\n",
    "\n",
    "    return (\n",
    "        CANONICAL_CLUSTER_NAMES[target_cluster.split('_', 1)[0]],\n",
    "        CONSENSUS_PROMPT_TAX_NAME_MAPPER[consensus_prompt_tax_name],\n",
    "        QUESTION_NAME_MAPPER[question_name]\n",
    "    )\n",
    "\n",
    "\n",
    "filtered_model_results = {}\n",
    "for model, results in results_by_model_consensus.items():\n",
    "    filtered_model_results[models_to_canonical_name[model]] = {\n",
    "        key_to_canonical_name(k): v for k, v in results.items()\n",
    "        if (k.startswith('cluster:') or k == 'overall_score') and not k.endswith('bootstrap_std') and not k.endswith('n_samples')\n",
    "    }\n",
    "\n",
    "\n",
    "filtered_model_df = pd.DataFrame(filtered_model_results)\n",
    "filtered_model_df.index = pd.MultiIndex.from_tuples(\n",
    "    filtered_model_df.index,\n",
    "    names=[\"Theme\", \"Consensus Category\", \"Consensus Criterion\"]\n",
    ")\n",
    "\n",
    "sort_order = [\n",
    "    CANONICAL_CLUSTER_NAMES[c.split('_', 1)[0]]\n",
    "    for c in CLUSTER_SORT_ORDER\n",
    "]\n",
    "sort_order = [\"Overall score\"] + sort_order\n",
    "filtered_model_df = filtered_model_df.loc[sort_order]\n",
    "\n",
    "# Sort columns by overall score (descending, left to right)\n",
    "sorted_cols = filtered_model_df.sort_values(ascending=False, by = (\"Overall score\", '', ''), axis = 1).keys().tolist()\n",
    "\n",
    "filtered_model_df = filtered_model_df[sorted_cols].applymap(lambda x: f\"{x:.4f}\" if isinstance(x, (float, int)) else x)\n",
    "\n",
    "display(filtered_model_df)\n",
    "print(filtered_model_df.to_latex(multirow=False))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Human eval data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "human_eval_results_by_model = get_results_by_filename(human_eval_filename_list)\n",
    "human_df = clustered_bar_plot(human_eval_results_by_model, \"axis\", ylabel = \"Score\", title = \"Physician-written response and reference response HealthBench scores by axis\", sort_order = HUMAN_EVAL_SORT_ORDER, cluster_sort_order = AXIS_SORT_ORDER)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(human_df, 'human_eval_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## More detailed human eval analysis"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "human_eval_results = get_results_by_filename(human_eval_allresults_filename_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "def example_level_metadata_to_score(metadata: dict) -> float:\n",
    "    rubric_items = metadata['rubric_items']\n",
    "    max_possible_score = 0\n",
    "    score_achieved = 0\n",
    "    for item in rubric_items:\n",
    "        points = item['points']\n",
    "        if points > 0:\n",
    "            max_possible_score += points\n",
    "        if item['criteria_met']:\n",
    "            score_achieved += points\n",
    "    return score_achieved / max_possible_score\n",
    "\n",
    "def clip_score(score: float) -> float:\n",
    "    if score > 1:\n",
    "        return 1\n",
    "    elif score < 0:\n",
    "        return 0\n",
    "    else:\n",
    "        return score\n",
    "\n",
    "def plot_human_eval_scores(\n",
    "    results,\n",
    "    baseline_name: str,\n",
    "    reference_completions_name: str,\n",
    "    x_label: str,\n",
    "    y_label: str,\n",
    "    title: str,\n",
    "    figsize: tuple[int, int] = (8, 6)\n",
    ") -> None:\n",
    "    data = []\n",
    "    for human_eval_metadata in results[baseline_name]['metadata']['example_level_metadata']:\n",
    "        prompt_id = human_eval_metadata['prompt_id']\n",
    "        reference_completion_metadata = [\n",
    "            e\n",
    "            for e in results[reference_completions_name]['metadata']['example_level_metadata']\n",
    "            if e['prompt_id'] == prompt_id\n",
    "        ]\n",
    "        assert len(reference_completion_metadata) == 4\n",
    "        data.append({\n",
    "            'prompt_id': prompt_id,\n",
    "            'human_eval_score': example_level_metadata_to_score(human_eval_metadata),\n",
    "            'reference_completion_scores': [example_level_metadata_to_score(m) for m in reference_completion_metadata],\n",
    "        })\n",
    "\n",
    "    # Calculate mean and max reference completion scores for each example\n",
    "    human_eval_scores = [r['human_eval_score'] for r in data]\n",
    "    mean_reference_scores = [sum(r['reference_completion_scores']) / len(r['reference_completion_scores']) for r in data]\n",
    "\n",
    "    score_diffs = np.array(human_eval_scores) - np.array(mean_reference_scores)\n",
    "\n",
    "    plt.figure(figsize=(8, 6))\n",
    "\n",
    "    # Determine symmetric range around zero so that zero falls at the center of one bin\n",
    "    min_diff, max_diff = score_diffs.min(), score_diffs.max()\n",
    "    max_abs = max(abs(min_diff), abs(max_diff))\n",
    "    num_bins = 201  # pick an odd number so one bin is centered at zero\n",
    "    bin_edges = np.linspace(-max_abs, max_abs, num_bins + 1)\n",
    "\n",
    "    # start plotting\n",
    "    plt.hist(score_diffs, bins=bin_edges, color='purple', alpha=0.5, density=True)\n",
    "    plt.xlabel(x_label)\n",
    "    plt.ylabel(y_label)\n",
    "    plt.title(title)\n",
    "    plt.ylim(0, 5)\n",
    "    plt.axvline(x=0, color='black', linestyle='--', linewidth=0.5, zorder=100)\n",
    "    plt.show()\n",
    "\n",
    "    human_eval_vs_ref = [human_eval_score - mean_reference_score for human_eval_score, mean_reference_score in zip(human_eval_scores, mean_reference_scores, strict = True)]\n",
    "\n",
    "    proportion_human_eval_greater_ref = len([x for x in human_eval_vs_ref if x > 0]) / len(human_eval_scores)\n",
    "    human_eval_greater_ref = [x for x in human_eval_vs_ref if x > 0]\n",
    "    mean_human_eval_greater_ref = sum(human_eval_greater_ref) / len(human_eval_greater_ref)\n",
    "    print(f\"Proportion of times that the human eval score is greater than the mean reference completion score: {proportion_human_eval_greater_ref * 100:.2f}%\")\n",
    "    print(f\"Mean magnitude of the difference: {mean_human_eval_greater_ref:.2f}\")\n",
    "\n",
    "    proprtion_human_eval_less_ref = len([x for x in human_eval_vs_ref if x < 0]) / len(human_eval_scores)\n",
    "    human_eval_less_ref = [x for x in human_eval_vs_ref if x < 0]\n",
    "    mean_human_eval_less_ref = sum(human_eval_less_ref) / len(human_eval_less_ref)\n",
    "    print(f\"Proportion of times that the human eval score is less than the mean reference completion score: {proprtion_human_eval_less_ref * 100:.2f}%\")\n",
    "    print(f\"Mean magnitude of the difference: {mean_human_eval_less_ref:.2f}\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_human_eval_scores(human_eval_results, 'aug_2024_reference_humanbaseline', 'aug_2024_reference_referencecompletions', x_label = \"Physician response score minus mean reference completion score\", y_label = \"Density\", title = \"Paired physician response and mean reference score differences (Sep 2024 models)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_human_eval_scores(human_eval_results, 'apr_2025_reference_humanbaseline', 'apr_2025_reference_referencecompletions', x_label = \"Physician response score minus mean reference completion score\", y_label = \"Density\", title = \"Paired physician response and mean reference score differences (Apr 2025 models)\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_distribution(\n",
    "    score_lists_with_labels,\n",
    "    title,\n",
    "    x_label,\n",
    "    y_label,\n",
    "    bins,\n",
    "    sort_order: list[str] | None = None,\n",
    "    clip: tuple[float | None, float | None] | None = None,\n",
    "    markers=True,\n",
    "):\n",
    "    \"\"\"\n",
    "    Overlay multiple histogram traces for rubric scores.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    *score_lists_with_labels : tuple[list[float], str]\n",
    "        Any number of (scores, label) pairs to plot.\n",
    "    title      : str, optional\n",
    "    x_label    : str, optional\n",
    "    y_label    : str, optional\n",
    "    bins       : int, optional\n",
    "        Number of histogram bins for every trace (default 20).\n",
    "    alpha      : float, optional\n",
    "        Opacity for each bar set so they remain visible when overlapping.\n",
    "    \"\"\"\n",
    "    if not score_lists_with_labels:\n",
    "        raise ValueError(\"Provide at least one (scores, label) pair.\")\n",
    "\n",
    "    plt.figure()\n",
    "\n",
    "\n",
    "    if sort_order:\n",
    "        assert set(sort_order) == {label for _, label in score_lists_with_labels}\n",
    "        score_lists_with_labels = sorted(score_lists_with_labels, key = lambda x: sort_order.index(x[1]))\n",
    "\n",
    "    for scores, label in score_lists_with_labels:\n",
    "        # Histogram counts (y) and bin centres (x)\n",
    "        if clip:\n",
    "            scores = np.clip(scores, clip[0], clip[1])\n",
    "\n",
    "        counts, edges = np.histogram(scores, bins=bins, density=True)\n",
    "        centres = 0.5 * (edges[:-1] + edges[1:])\n",
    "\n",
    "        line, = plt.plot(centres, counts, label=models_to_canonical_name[label])      # line\n",
    "        if markers:\n",
    "            plt.scatter(centres, counts, color=line.get_color())\n",
    "\n",
    "    plt.xlabel(x_label)\n",
    "    plt.ylabel(y_label)\n",
    "    plt.title(title)\n",
    "    plt.legend()\n",
    "    plt.tight_layout()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "lengths = defaultdict(list)\n",
    "for model in human_eval_results:\n",
    "    for example in human_eval_results[model]['metadata']['example_level_metadata']:\n",
    "        lengths[model].append(len(example['completion'][0]['content']))\n",
    "\n",
    "# Sort the dictionary by the mean completion length so the legend order is meaningful.\n",
    "lengths = dict(\n",
    "    sorted(lengths.items(), key=lambda kv: np.mean(kv[1]))\n",
    ")\n",
    "\n",
    "lengths_list = [(v, k) for k, v in lengths.items()]\n",
    "\n",
    "# Plot the distribution of completion lengths by model.\n",
    "plot_distribution(\n",
    "    lengths_list,\n",
    "    title = \"Distribution of completion lengths for physician and reference responses\",\n",
    "    x_label=\"Completion length (characters); clipped to [0, 7500]\",\n",
    "    y_label = \"Density\",\n",
    "    bins = 50,\n",
    "    clip = (0, 7500),\n",
    "    sort_order = HUMAN_EVAL_SORT_ORDER\n",
    ")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Date frontier plot"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "def date_frontier_plot(date_plot_df):\n",
    "    \"\"\"\n",
    "    Plot model scores against release dates, highlight the empirical\n",
    "    performance frontier, and annotate points.\n",
    "\n",
    "    • Frontier models:  label directly above the point (horizontal text)\n",
    "    • Non-frontier models: label on a 45° downward–right diagonal that\n",
    "      starts just above-left of the point and is automatically nudged to\n",
    "      minimise overlaps (only these labels are moved).\n",
    "    \"\"\"\n",
    "\n",
    "    plot_entries: list[tuple[pd.Timestamp, float, str]] = []\n",
    "    models = date_plot_df.index.get_level_values(0).unique()\n",
    "    for model in models:\n",
    "        idx = (model, \"overall_score\")\n",
    "        if idx not in date_plot_df.index:\n",
    "            continue\n",
    "        plot_entries.append(\n",
    "            (\n",
    "                pd.to_datetime(model_first_release[model]),\n",
    "                float(date_plot_df.loc[idx, \"value\"]),\n",
    "                model,\n",
    "            )\n",
    "        )\n",
    "\n",
    "    if not plot_entries:  # Nothing to plot\n",
    "        return\n",
    "\n",
    "    plot_entries.sort(key=lambda x: x[0])  # chronological order\n",
    "\n",
    "    frontier: list[tuple[pd.Timestamp, float, str]] = []\n",
    "    max_score = float(\"-inf\")\n",
    "    for dt, sc, mdl in plot_entries:\n",
    "        if sc > max_score:\n",
    "            frontier.append((dt, sc, mdl))\n",
    "            max_score = sc\n",
    "\n",
    "    frontier_set = {(d, s) for d, s, _ in frontier}\n",
    "\n",
    "\n",
    "    all_dates            = [d for d, *_ in plot_entries]\n",
    "    non_frontier_dates   = [d for d, s, _ in plot_entries if (d, s) not in frontier_set]\n",
    "    non_frontier_scores  = [s for d, s, _ in plot_entries if (d, s) not in frontier_set]\n",
    "    frontier_dates       = [d for d, *_ in frontier]\n",
    "    frontier_scores      = [s for _, s, _ in frontier]\n",
    "\n",
    "    # ------------------------------------------------------------------ #\n",
    "    # Construct the figure\n",
    "    # ------------------------------------------------------------------ #\n",
    "    fig, ax = plt.subplots(figsize=(8, 5))\n",
    "\n",
    "    # Scatter + line\n",
    "    ax.scatter(non_frontier_dates, non_frontier_scores, s=60, color=\"gray\",     label=\"Below frontier\")\n",
    "    ax.scatter(frontier_dates,       frontier_scores,       s=60, color=\"tab:blue\", label=\"Frontier models\")\n",
    "    ax.plot   (frontier_dates,       frontier_scores,             color=\"tab:blue\", linewidth=2, label=\"Frontier path\")\n",
    "\n",
    "\n",
    "    from matplotlib.transforms import offset_copy\n",
    "\n",
    "    frontier_texts, non_frontier_texts = [], []\n",
    "\n",
    "    for dt, sc, mdl in plot_entries:\n",
    "        is_frontier = (dt, sc) in frontier_set\n",
    "\n",
    "        if is_frontier:\n",
    "            # Static, horizontal label 5 px above the point\n",
    "            txt = ax.text(\n",
    "                dt,\n",
    "                sc,\n",
    "                models_to_canonical_name[mdl],\n",
    "                transform=offset_copy(ax.transData, fig, x=0 - (19 if mdl == 'gpt-4.1' else 0), y=5, units=\"points\"),\n",
    "                ha=\"center\",\n",
    "                va=\"bottom\",\n",
    "                fontsize=9,\n",
    "            )\n",
    "            frontier_texts.append(txt)\n",
    "        else:\n",
    "            # Diagonal (-45°) label just below-right of the point\n",
    "            txt = ax.text(\n",
    "                dt,\n",
    "                sc,\n",
    "                models_to_canonical_name[mdl],\n",
    "                transform=offset_copy(ax.transData, fig, x=3, y=-2, units=\"points\"),\n",
    "                ha=\"left\",\n",
    "                va=\"top\",\n",
    "                fontsize=7,\n",
    "                rotation=-30,\n",
    "            )\n",
    "            non_frontier_texts.append(txt)\n",
    "\n",
    "    ax.xaxis.set_major_locator(mdates.AutoDateLocator())\n",
    "    ax.xaxis.set_major_formatter(mdates.DateFormatter(\"%Y-%m-%d\"))\n",
    "    fig.autofmt_xdate()\n",
    "\n",
    "    ax.set_xlabel(\"Model release date\")\n",
    "    ax.set_ylabel(\"Score\")\n",
    "    ax.set_title(\"HealthBench performance frontier over time\")\n",
    "    ax.set_ylim(0, 1)\n",
    "    ax.set_xlim(\n",
    "        min(all_dates) - pd.Timedelta(days=30),\n",
    "        pd.to_datetime(\"2025-05-06\") + pd.Timedelta(days=35),\n",
    "    )\n",
    "\n",
    "    # Reverse legend order\n",
    "    handles, labels = ax.get_legend_handles_labels()\n",
    "    ax.legend(handles[::-1], labels[::-1], loc=\"upper left\")\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "date_plot_df = get_df_from_results_by_model(main_eval_results_by_model)\n",
    "date_frontier_plot(date_plot_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Meta eval results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "def agg_n_and_val_list(n_and_vals: list[dict]) -> dict:\n",
    "    \"\"\"Aggregate a list of {'value', 'n'} dicts into a single weighted average.\"\"\"\n",
    "    values = [d[\"value\"] for d in n_and_vals]\n",
    "    n = [d[\"n\"] for d in n_and_vals]\n",
    "    weighted_values = [v * n_i for v, n_i in zip(values, n)]\n",
    "    return {\"value\": sum(weighted_values) / sum(n), \"n\": sum(n)}\n",
    "\n",
    "def plot_model_physician_agreement(metrics, individual_physician_agreement_metrics, overall_model_agreement_metrics) -> pd.DataFrame:\n",
    "    MIN_SAMPLES = 50\n",
    "\n",
    "    all_clusters = {\n",
    "        k.split(' ')[0].rstrip(':')\n",
    "        for k in metrics.keys()\n",
    "        if k.startswith('cluster:')\n",
    "    }\n",
    "    METRICS_KEY_TEMPLATE_MODEL = '{CLUSTER_NAME}: pairwise_model_f1_balanced'\n",
    "    METRICS_KEY_TEMPLATE_PHYSICIAN = '{CLUSTER_NAME}: pairwise_physician_f1_balanced'\n",
    "\n",
    "    cluster_names = {\n",
    "        c.replace('cluster:', '').split('_', 1)[0]\n",
    "        for c in all_clusters\n",
    "    }\n",
    "    assert set(cluster_names) == set(CLUSTER_SORT_ORDER_SHORT)\n",
    "    cluster_names = list(reversed(CLUSTER_SORT_ORDER_SHORT))\n",
    "\n",
    "    clusters_and_subclusters = {\n",
    "        c: {\n",
    "            full_name for full_name in all_clusters if full_name.startswith('cluster:' + c)\n",
    "        }\n",
    "        for c in cluster_names\n",
    "    }\n",
    "\n",
    "    y_pos = np.arange(len(cluster_names))\n",
    "\n",
    "    fig, ax = plt.subplots(figsize=(8, 5))\n",
    "\n",
    "    # Collect data for the table\n",
    "    summary_rows: list[dict[str, float | int | str]] = []\n",
    "    all_rows = []\n",
    "    for i, cluster in enumerate(cluster_names):\n",
    "        physician_n_and_vals = defaultdict(list)\n",
    "        model_n_and_vals = []\n",
    "\n",
    "        # Gather data for every sub-cluster\n",
    "        for subcluster in clusters_and_subclusters[cluster]:\n",
    "            key_physician = METRICS_KEY_TEMPLATE_PHYSICIAN.format(CLUSTER_NAME=subcluster)\n",
    "            key_model = METRICS_KEY_TEMPLATE_MODEL.format(CLUSTER_NAME=subcluster)\n",
    "\n",
    "            # Per-physician data\n",
    "            indiv_n_and_vals = individual_physician_agreement_metrics[key_physician]\n",
    "            for physician_id, n_and_val in indiv_n_and_vals.items():\n",
    "                physician_n_and_vals[physician_id].append(n_and_val)\n",
    "\n",
    "            # Model data\n",
    "            model_n_and_val = overall_model_agreement_metrics[key_model]\n",
    "            model_n_and_vals.append(model_n_and_val)\n",
    "\n",
    "        # Aggregate per-physician data\n",
    "        agg_physician_n_and_vals = [agg_n_and_val_list(vals) for vals in physician_n_and_vals.values()]\n",
    "\n",
    "        # Keep physicians with enough samples\n",
    "        agg_physician_n_and_vals_filtered = [\n",
    "            (d[\"value\"], d[\"n\"]) for d in agg_physician_n_and_vals if d[\"n\"] > MIN_SAMPLES\n",
    "        ]\n",
    "        if not agg_physician_n_and_vals_filtered:\n",
    "            continue  # Skip clusters without physicians meeting the minimum sample count\n",
    "\n",
    "        indiv_vals, indiv_n = zip(*agg_physician_n_and_vals_filtered)\n",
    "        num_physicians = len(indiv_vals)\n",
    "\n",
    "\n",
    "        # Plot individual physicians\n",
    "        jitter = np.random.uniform(-0.15, 0.15, size=num_physicians)\n",
    "        ax.scatter(\n",
    "            indiv_vals,\n",
    "            i + jitter,\n",
    "            color=\"gray\",\n",
    "            alpha=0.6,\n",
    "            s=30,\n",
    "            label=\"Individual physicians\" if i == 0 else None,\n",
    "        )\n",
    "        for val in indiv_vals:\n",
    "            all_rows.append({\n",
    "                \"theme\": CANONICAL_CLUSTER_NAMES[cluster],\n",
    "                \"score\": val,\n",
    "                \"type\": \"Individual physicians\"\n",
    "            })\n",
    "\n",
    "        # Physician weighted average\n",
    "        grand_physician = agg_n_and_val_list(agg_physician_n_and_vals)\n",
    "        grand_physician_val = grand_physician[\"value\"]\n",
    "        ax.scatter(\n",
    "            [grand_physician_val],\n",
    "            [i],\n",
    "            color=\"blue\",\n",
    "            marker=\"*\",\n",
    "            s=120,\n",
    "            edgecolor=\"black\",\n",
    "            label=wrap_label(\"Weighted average of physicians\", 18) if i == 0 else None,\n",
    "        )\n",
    "        all_rows.append({\n",
    "            \"theme\": CANONICAL_CLUSTER_NAMES[cluster],\n",
    "            \"score\": grand_physician_val,\n",
    "            \"type\": \"Weighted average of physicians\"\n",
    "        })\n",
    "\n",
    "        # Model score\n",
    "        model_agg = agg_n_and_val_list(model_n_and_vals)\n",
    "        model_val = model_agg[\"value\"]\n",
    "        ax.scatter(\n",
    "            [model_val],\n",
    "            [i],\n",
    "            color=\"red\",\n",
    "            marker=\"^\",\n",
    "            s=80,\n",
    "            edgecolor=\"black\",\n",
    "            label=\"GPT-4.1 grader\" if i == 0 else None,\n",
    "        )\n",
    "        all_rows.append({\n",
    "            \"theme\": CANONICAL_CLUSTER_NAMES[cluster],\n",
    "            \"score\": model_val,\n",
    "            \"type\": \"GPT-4.1 grader\"\n",
    "        })\n",
    "\n",
    "\n",
    "        # Model percentile among physicians\n",
    "        model_percentile = (\n",
    "            (\n",
    "                np.sum(np.array(indiv_vals) < model_val)\n",
    "                + 0.5 * np.sum(np.array(indiv_vals) == model_val)\n",
    "            )\n",
    "            / num_physicians\n",
    "            * 100\n",
    "        )\n",
    "\n",
    "        # Save row for summary table\n",
    "        summary_rows.append(\n",
    "            {\n",
    "                \"Cluster\": CANONICAL_CLUSTER_NAMES[cluster.replace(\"cluster:\", \"\")],\n",
    "                \"# Physicians\": num_physicians,\n",
    "                \"Physician Weighted Avg\": round(grand_physician_val, 3),\n",
    "                \"Model Score\": round(model_val, 3),\n",
    "                \"Model Percentile\": round(model_percentile, 1),\n",
    "            }\n",
    "        )\n",
    "\n",
    "    # Finalise plot\n",
    "    cluster_labels = [wrap_label(CANONICAL_CLUSTER_NAMES[c.replace(\"cluster:\", \"\")], 20) for c in cluster_names]\n",
    "    ax.set_yticks(y_pos)\n",
    "    ax.set_yticklabels(cluster_labels)\n",
    "    ax.set_xlabel(\"Macro F1\")\n",
    "    ax.set_xlim(0, 1)\n",
    "    ax.set_title(\"Model-physician and physician-physician agreement for HealthBench Consensus\")\n",
    "\n",
    "    fig.set_size_inches(15, 5)\n",
    "    handles, labels = ax.get_legend_handles_labels()\n",
    "    by_label = dict(zip(labels, handles))\n",
    "    ax.legend(by_label.values(), by_label.keys(), loc=\"center left\", bbox_to_anchor=(1.02, 0.5))\n",
    "    plt.tight_layout(rect=[0, 0, 0.85, 1])\n",
    "    plt.show()\n",
    "\n",
    "    # Display summary table\n",
    "    summary_df = pd.DataFrame(summary_rows).set_index(\"Cluster\")\n",
    "    display(summary_df)\n",
    "    print(summary_df.to_latex())\n",
    "\n",
    "    return pd.DataFrame(all_rows)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "meta_eval_results_all = get_results_by_filename(meta_eval_allresults_filename_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "for model_name, results in meta_eval_results_all.items():\n",
    "    metrics = results['metrics']\n",
    "    metadata = results['metadata']\n",
    "    individual_physician_agreement_metrics = metadata['physician_agreement_metric_lists']\n",
    "    overall_model_agreement_metrics = metadata['model_agreement_metrics']\n",
    "    meta_eval_df = plot_model_physician_agreement(metrics, individual_physician_agreement_metrics, overall_model_agreement_metrics)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(meta_eval_df, 'meta_eval_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Plot worst at k"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "def rubric_accuracy_per_instance(rubric_items: list[dict[str, int]]) -> float:\n",
    "    total_possible_points = sum(\n",
    "        rubric_item['points'] for rubric_item in rubric_items if rubric_item['points'] > 0\n",
    "    )\n",
    "    if total_possible_points <= 0:\n",
    "        # should not happen for overall score, but may happen for tags\n",
    "        raise ValueError(f\"Total possible points is 0 for rubric items: {rubric_items}\")\n",
    "\n",
    "    achieved_points = sum(\n",
    "        rubric_item['points']\n",
    "        for rubric_item in rubric_items\n",
    "        if rubric_item['criteria_met']\n",
    "    )\n",
    "    overall_score = achieved_points / total_possible_points\n",
    "    return overall_score\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_fail_at_k(fail_at_k_dicts_with_labels, x_label, y_label, title, show_scatter=True):\n",
    "    \"\"\"\n",
    "    Plot one or more fail-at-k dictionaries as lines (optionally with scatter markers).\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    *fail_at_k_dicts_with_labels : tuple[dict, str]\n",
    "        Any number of 2-tuples of the form (dict, label).\n",
    "        Each dict maps k-values → metric value, and the label appears in the legend.\n",
    "    x_label : str, optional\n",
    "        Label for the x-axis.  Default is \"k\".\n",
    "    y_label : str, optional\n",
    "        Label for the y-axis.  Default is \"Failure rate\".\n",
    "    title   : str, optional\n",
    "        Figure title.\n",
    "    show_scatter : bool, optional\n",
    "        If True (default) draw scatter markers on top of the line.\n",
    "    \"\"\"\n",
    "    if not fail_at_k_dicts_with_labels:\n",
    "        raise ValueError(\"Provide at least one (dict, label) pair.\")\n",
    "\n",
    "    plt.figure()\n",
    "\n",
    "    all_data = []\n",
    "\n",
    "    fail_at_k_dicts_with_labels = sorted(fail_at_k_dicts_with_labels, key=lambda x: np.mean(list(x[0].values())), reverse=True)\n",
    "    for fail_at_k_dict, label in fail_at_k_dicts_with_labels:\n",
    "        x = list(fail_at_k_dict.keys())\n",
    "        y = list(fail_at_k_dict.values())\n",
    "        y = np.clip(y, 0, 1)\n",
    "        mapped_label = models_to_canonical_name[label]\n",
    "        line, = plt.plot(x, y, label=mapped_label)        # line\n",
    "        if show_scatter:\n",
    "            plt.scatter(x, y, color=line.get_color())  # scatter, same color\n",
    "\n",
    "        for x, y in zip(x, y):\n",
    "            all_data.append({\n",
    "                'model': label,\n",
    "                'k': x,\n",
    "                'any_fail_rate': y,\n",
    "            })\n",
    "\n",
    "    plt.xlabel(x_label)\n",
    "    plt.ylabel(y_label)\n",
    "    plt.title(title)\n",
    "    plt.xticks([k for k in sorted(set().union(*[d[0].keys() for d in fail_at_k_dicts_with_labels])) if k == 1 or k % 2 == 0])\n",
    "    plt.legend()\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return pd.DataFrame(all_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "results_worst_at_k = get_results_by_filename(many_replicate_filename_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "results_worst_at_k_pivoted = {}\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    example_level_metadata = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    results_by_prompt = defaultdict(list)\n",
    "    for prompt in example_level_metadata:\n",
    "        results_by_prompt[prompt['prompt_id']].append(prompt['rubric_items'])\n",
    "    results_worst_at_k_pivoted[model_name] = list(results_by_prompt.values())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def precompute_comb_indices(N):\n",
    "    \"\"\"\n",
    "    Return a dict:  k -> (n_subsets, k) array containing\n",
    "    every length-k subset of {0, 1, 2, ..., N-1}.\n",
    "    (N is the total number of samples drawn and therefore the max worst-at-k that can be computed)\n",
    "    This can be used as the an index for the scores_per_instance array.\n",
    "    \"\"\"\n",
    "    combination_indices = {}\n",
    "    for k in range(1, N + 1):\n",
    "        combination_indices[k] = np.array(list(itertools.combinations(range(N), k)))\n",
    "    return combination_indices\n",
    "\n",
    "seen_n_instances = set()\n",
    "for model_name, instances_per_problem in results_worst_at_k_pivoted.items():\n",
    "    for instance in instances_per_problem:\n",
    "        if len(instance) not in seen_n_instances:\n",
    "            seen_n_instances.add(len(instance))\n",
    "\n",
    "assert len(seen_n_instances) == 1\n",
    "n_instances = list(seen_n_instances)[0]\n",
    "combination_indices = precompute_comb_indices(n_instances)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "worst_at_k_per_model = defaultdict(dict)\n",
    "for model_name, instances_per_problem in results_worst_at_k_pivoted.items():\n",
    "    worst_at_k_per_problem = defaultdict(list)\n",
    "    for results_per_instance in instances_per_problem:\n",
    "        scores_per_instance = np.array([rubric_accuracy_per_instance(instance) for instance in results_per_instance])\n",
    "        assert len(scores_per_instance) == n_instances\n",
    "        for k in range(1, len(scores_per_instance) + 1):\n",
    "            subset_idxs = combination_indices[k]\n",
    "            subsets = scores_per_instance[subset_idxs]\n",
    "            assert subsets.shape == (len(subset_idxs), k)\n",
    "            worst_of_k_per_subset = np.min(subsets, axis = 1)\n",
    "            assert len(worst_of_k_per_subset) == len(subset_idxs)\n",
    "            average_worst_of_k = np.mean(worst_of_k_per_subset)\n",
    "            worst_at_k_per_problem[k].append(average_worst_of_k)\n",
    "\n",
    "    for k, scores in worst_at_k_per_problem.items():\n",
    "        assert len(scores) == 5000, f\"len(scores) = {len(scores)}\"\n",
    "        mean_score = sum(scores) / len(scores)\n",
    "        clipped_mean_score = np.clip(mean_score, 0, 1)\n",
    "        worst_at_k_per_model[model_name][k] = clipped_mean_score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "worst_at_k_list = [(v, k) for k, v in worst_at_k_per_model.items()]\n",
    "fail_at_k_data = plot_fail_at_k(worst_at_k_list, x_label=\"Number of samples (k)\", y_label=\"Score\", title = \"Worst-case HealthBench score at k samples\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "save_csv_and_print(fail_at_k_data, 'fail_at_k_data.csv')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Analysis of eval variability and length-adjusted win rates"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "scores_by_eval_run_by_model = defaultdict(list)\n",
    "for model_name, instances_per_problem in results_worst_at_k_pivoted.items():\n",
    "    results_by_eval_run = list(zip(*instances_per_problem))\n",
    "    for results_per_eval_run in results_by_eval_run:\n",
    "        problem_level_scores = [rubric_accuracy_per_instance(instance) for instance in results_per_eval_run]\n",
    "        overall_score = np.mean(problem_level_scores)\n",
    "        overall_score = np.clip(overall_score, 0, 1)\n",
    "        scores_by_eval_run_by_model[model_name].append(overall_score)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prepare a DataFrame summarizing mean, std, min, max for each model's eval run scores\n",
    "summary_stats = []\n",
    "for model_name, scores in scores_by_eval_run_by_model.items():\n",
    "    if len(scores) == 0:\n",
    "        continue\n",
    "    stats = {\n",
    "        \"model\": model_name,\n",
    "        \"mean\": np.mean(scores),\n",
    "        \"min\": np.min(scores),\n",
    "        \"max\": np.max(scores),\n",
    "\n",
    "        \"std\": np.std(scores),\n",
    "    }\n",
    "    summary_stats.append(stats)\n",
    "\n",
    "scores_by_eval_run_df = pd.DataFrame(summary_stats).set_index(\"model\")\n",
    "scores_by_eval_run_df = scores_by_eval_run_df.sort_values(\"mean\", ascending=False)\n",
    "\n",
    "display(scores_by_eval_run_df.applymap(lambda x: f\"{x:.4f}\" if isinstance(x, (float, int)) else x))\n",
    "print(scores_by_eval_run_df.applymap(lambda x: f\"{x:.4f}\" if isinstance(x, (float, int)) else x).to_latex())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "# desired data shape:\n",
    "# problem (5000) -> model (5) -> list of solutions from that model\n",
    "\n",
    "lengths = []\n",
    "worst_at_k_model_names = list(results_worst_at_k.keys())\n",
    "results_by_prompt_by_model = defaultdict(lambda: defaultdict(list))\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    example_level_metadata = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    for prompt in example_level_metadata:\n",
    "        score = rubric_accuracy_per_instance(prompt['rubric_items'])\n",
    "        length = len(prompt['completion'][0]['content'])\n",
    "        lengths.append(length)\n",
    "        results_by_prompt_by_model[prompt['prompt_id']][model_name].append({\n",
    "            'score': score,\n",
    "            'length': length,\n",
    "        }) # precompute\n",
    "\n",
    "assert len(results_by_prompt_by_model) == 5000\n",
    "inner_example = list(results_by_prompt_by_model.values())[0]\n",
    "inner_inner_example = list(inner_example.values())[0]\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "def compare_model_results_per_problem(results_modela, results_modelb, length_control: float | None = None):\n",
    "    # take every pair of results\n",
    "    # figure out which is better\n",
    "    # return the list of wins\n",
    "    wins = []\n",
    "\n",
    "    for result_modela, result_modelb in itertools.product(results_modela, results_modelb):\n",
    "\n",
    "        if length_control is not None:\n",
    "            length_a = result_modela['length']\n",
    "            length_b = result_modelb['length']\n",
    "            if length_a == 0 or length_b == 0:\n",
    "                continue\n",
    "\n",
    "            symmetric_pct_diff = abs(length_a - length_b) / ((length_a + length_b) / 2)\n",
    "            if symmetric_pct_diff > length_control:\n",
    "                continue\n",
    "\n",
    "        score_a = result_modela['score']\n",
    "        score_b = result_modelb['score']\n",
    "        if score_a > score_b:\n",
    "            wins.append(1)\n",
    "        elif score_a < score_b:\n",
    "            wins.append(0)\n",
    "        elif score_a == score_b:\n",
    "            continue\n",
    "\n",
    "    win_rate = (sum(wins) / len(wins)) if wins else None\n",
    "    return win_rate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_win_rate_df(results_by_prompt_by_model, length_control: float | None = None):\n",
    "    win_rate_lists_modela_vs_modelb = defaultdict(lambda: defaultdict(list))\n",
    "    for modela in worst_at_k_model_names:\n",
    "        for modelb in worst_at_k_model_names:\n",
    "            for _prompt_id, prompt_results in results_by_prompt_by_model.items():\n",
    "                results_modela = prompt_results[modela]\n",
    "                results_modelb = prompt_results[modelb]\n",
    "                assert len(results_modela) == len(results_modelb) == 16\n",
    "                win_rate = compare_model_results_per_problem(results_modela, results_modelb, length_control)\n",
    "                if win_rate is not None:\n",
    "                    win_rate_lists_modela_vs_modelb[modela][modelb].append(win_rate)\n",
    "\n",
    "    win_rates_modela_vs_modelb = defaultdict(dict)\n",
    "    for modela in worst_at_k_model_names:\n",
    "        for modelb in worst_at_k_model_names:\n",
    "            win_rate_list = win_rate_lists_modela_vs_modelb[modela][modelb]\n",
    "            mapped_model_a = models_to_canonical_name[modela]\n",
    "            mapped_model_b = models_to_canonical_name[modelb]\n",
    "            win_rates_modela_vs_modelb[mapped_model_a][mapped_model_b] = sum(win_rate_list) / len(win_rate_list)\n",
    "\n",
    "    df_win_rates = pd.DataFrame(win_rates_modela_vs_modelb)\n",
    "    # Compute mean win rate for each model (row)\n",
    "    mean_win_rates = df_win_rates.mean(axis=1).sort_values(ascending=True)\n",
    "    # Reorder both rows and columns by descending mean win rate\n",
    "    df_win_rates = df_win_rates.loc[mean_win_rates.index, mean_win_rates.index]\n",
    "    df_win_rates = df_win_rates.T.round(3)\n",
    "    return df_win_rates\n",
    "\n",
    "def format_win_rate_df(df):\n",
    "    df = df.map(lambda x: f\"{x:.1%}\" if x != 0.5 and pd.notna(x) else \"-\")\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "win_rate_df_no_length_control = get_win_rate_df(results_by_prompt_by_model, length_control = None)\n",
    "formatted_win_rate_df_no_length_control = format_win_rate_df(win_rate_df_no_length_control)\n",
    "display(formatted_win_rate_df_no_length_control)\n",
    "print(formatted_win_rate_df_no_length_control.to_latex())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "win_rate_df_length_control = get_win_rate_df(results_by_prompt_by_model, length_control = 0.1)\n",
    "formatted_win_rate_df_length_control = format_win_rate_df(win_rate_df_length_control)\n",
    "display(formatted_win_rate_df_length_control)\n",
    "print(formatted_win_rate_df_length_control.to_latex())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "win_rate_df_diff = (win_rate_df_no_length_control - win_rate_df_length_control) / (win_rate_df_no_length_control - 0.5)\n",
    "formatted_win_rate_df_diff = format_win_rate_df(win_rate_df_diff)\n",
    "display(formatted_win_rate_df_diff)\n",
    "print(formatted_win_rate_df_diff.to_latex())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Plot distributions"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "results_main_eval = get_results_by_filename(main_allresults_filename_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "## Per example distribution of scores.\n",
    "## I.e. Just plot the mass of rubric scores per one instance.\n",
    "rubric_scores = []\n",
    "for model_name, results in results_main_eval.items():\n",
    "    rubric_scores_per_model = []\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    for prompt in data:\n",
    "        rubric_scores_per_model.append(rubric_accuracy_per_instance(prompt['rubric_items']))\n",
    "    rubric_scores.append([rubric_scores_per_model, model_name])\n",
    "# Sort models by mean rubric score (descending)\n",
    "rubric_scores = sorted(\n",
    "    rubric_scores,\n",
    "    key=lambda x: sum(x[0]) / len(x[0]) if len(x[0]) > 0 else float('-inf'),\n",
    "    reverse=True\n",
    ")\n",
    "\n",
    "plot_distribution(rubric_scores, title=\"Distribution of scores per problem\", x_label=\"Score (clipped to [0,1])\", y_label=\"Relative frequency\", bins=20, clip=(0, 1), markers = False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Solution length distribution\n",
    "## Per one instance.\n",
    "solution_lengths = []\n",
    "for model_name, results in results_main_eval.items():\n",
    "    solution_lengths_per_model = []\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None:\n",
    "            continue\n",
    "        solution_lengths_per_model.append(usage['output_tokens'])\n",
    "    solution_lengths.append([solution_lengths_per_model, model_name])\n",
    "\n",
    "clip_to = 4000\n",
    "plot_distribution(solution_lengths, title=f\"Distribution of sollen\\n(clipped to [0, {clip_to}])\", x_label=\"sollen\", y_label=\"Frequency\", bins=40, clip = (0, clip_to))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Response length distribution\n",
    "## Per one instance.\n",
    "response_lengths = []\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    response_lengths_per_model = []\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None or usage['output_reasoning_tokens'] is None:\n",
    "            continue\n",
    "        response_lengths_per_model.append(usage['output_tokens'] - usage['output_reasoning_tokens'])\n",
    "\n",
    "    response_lengths.append([response_lengths_per_model, model_name])\n",
    "\n",
    "clip_to = 2000\n",
    "plot_distribution(response_lengths, title=f\"Distribution of response length (final message)\\n(clipped to [0, {clip_to}])\", x_label=\"response length\", y_label=\"Frequency\", bins=40, clip=(0, clip_to))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Correlation plots"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_lista_vs_listb(\n",
    "    list_pairs_with_labels,\n",
    "    x_label,\n",
    "    y_label,\n",
    "    title,\n",
    "    alpha: float | None = None,\n",
    "    clipx: tuple[float | None, float | None] | None = None,\n",
    "    clipy: tuple[float | None, float | None] | None = None,\n",
    "    show_trendline=True,\n",
    "):\n",
    "    \"\"\"\n",
    "    Plot one or more (x-list, y-list) pairs as scatters with optional OLS trendlines.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    *list_pairs_with_labels : tuple[list, list, str]\n",
    "        Any number of 3-tuples: (lista, listb, label).\n",
    "        - lista : x-axis values\n",
    "        - listb : y-axis values\n",
    "        - label : legend label for that pair\n",
    "    x_label        : str, optional  – label for x-axis.\n",
    "    y_label        : str, optional  – label for y-axis.\n",
    "    title          : str, optional  – plot title.\n",
    "    show_trendline : bool, optional – draw best-fit line(s) if True (default).\n",
    "    \"\"\"\n",
    "    if not list_pairs_with_labels:\n",
    "        raise ValueError(\"Provide at least one (x_list, y_list, label) triple.\")\n",
    "\n",
    "    plt.figure()\n",
    "\n",
    "    for lista, listb, label in list_pairs_with_labels:\n",
    "        if len(lista) != len(listb):\n",
    "            raise ValueError(f\"Lists for '{label}' are not the same length.\")\n",
    "\n",
    "        if clipx:\n",
    "            lista = np.clip(lista, clipx[0], clipx[1])\n",
    "        if clipy:\n",
    "            listb = np.clip(listb, clipy[0], clipy[1])\n",
    "\n",
    "        # Scatter\n",
    "        line = plt.scatter(lista, listb, label=label, alpha=alpha)  # draw; keep handle\n",
    "\n",
    "        # Trend-line (OLS) – use same color as scatter\n",
    "        if show_trendline:\n",
    "            m, b = np.polyfit(lista, listb, 1)\n",
    "            xs = np.linspace(min(lista), max(lista), 200)\n",
    "            plt.plot(xs, m * xs + b, color=line.get_facecolor().flatten())\n",
    "\n",
    "        # Pearson r for legend suffix\n",
    "        r = float(np.corrcoef(lista, listb)[0, 1])\n",
    "        print(f\"Pearson r for {label}: {r:.3f}\")\n",
    "        from scipy.stats import spearmanr\n",
    "        r_spearman = float(spearmanr(lista, listb).statistic)\n",
    "        print(f\"Spearman r for {label}: {r_spearman:.3f}\")\n",
    "\n",
    "    plt.xlabel(x_label)\n",
    "    plt.ylabel(y_label)\n",
    "    plt.title(title)\n",
    "    plt.tight_layout()\n",
    "    plt.legend()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Solution length VS score\n",
    "## Per one instance.\n",
    "solution_length_vs_score = []\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    solution_lengths_per_model = []\n",
    "    rubric_scores_per_model = []\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None:\n",
    "            continue\n",
    "        solution_lengths_per_model.append(usage['output_tokens'])\n",
    "        rubric_scores_per_model.append(rubric_accuracy_per_instance(prompt['rubric_items']))\n",
    "    solution_length_vs_score.append([rubric_scores_per_model, solution_lengths_per_model, model_name])\n",
    "\n",
    "print('Response + CoT length\\n')\n",
    "plot_lista_vs_listb(solution_length_vs_score, x_label=\"Rubrics Score\", y_label=\"Solution length\", title=\"Rubrics Score VS Solution length\", clipx=(0, 1), alpha = 0.3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Response length VS score\n",
    "## Per one instance.\n",
    "response_length_vs_score = []\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    response_lengths_per_model = []\n",
    "    rubric_scores_per_model = []\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None or usage['output_reasoning_tokens'] is None:\n",
    "            continue\n",
    "        response_lengths_per_model.append(usage['output_tokens'] - usage['output_reasoning_tokens'])\n",
    "        rubric_scores_per_model.append(rubric_accuracy_per_instance(prompt['rubric_items']))\n",
    "    response_length_vs_score.append([rubric_scores_per_model, response_lengths_per_model, model_name])\n",
    "\n",
    "print('Response length only\\n')\n",
    "plot_lista_vs_listb(response_length_vs_score, x_label=\"Rubrics Score\", y_label=\"Response length\", title=\"Rubrics Score VS Response length\", clipx=(0, 1), clipy=(0, None), alpha = 0.3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Response length VS score\n",
    "## Per one instance.\n",
    "response_length_vs_score = []\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    if not ('o1' in model_name or 'o3' in model_name or 'o4' in model_name):\n",
    "        continue\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    response_lengths_per_model = []\n",
    "    rubric_scores_per_model = []\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None or usage['output_reasoning_tokens'] is None:\n",
    "            continue\n",
    "        response_lengths_per_model.append(usage['output_reasoning_tokens'])\n",
    "        rubric_scores_per_model.append(rubric_accuracy_per_instance(prompt['rubric_items']))\n",
    "    response_length_vs_score.append([rubric_scores_per_model, response_lengths_per_model, model_name])\n",
    "\n",
    "print('CoT length only\\n')\n",
    "plot_lista_vs_listb(response_length_vs_score, x_label=\"Rubrics Score\", y_label=\"Response length\", title=\"Rubrics Score VS CoT length\", clipx=(0, 1), clipy=(0, None), alpha = 0.3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Response length VS score\n",
    "## Per one instance.\n",
    "response_length_vs_thinking_length = []\n",
    "for model_name, results in results_worst_at_k.items():\n",
    "    if not ('o1' in model_name or 'o3' in model_name or 'o4' in model_name):\n",
    "        continue\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    response_lengths_per_model = []\n",
    "    thinking_lengths_per_model = []\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['output_tokens'] is None or usage['output_reasoning_tokens'] is None:\n",
    "            continue\n",
    "        thinking_lengths_per_model.append(usage['output_reasoning_tokens'])\n",
    "        response_lengths_per_model.append(usage['output_tokens'] - usage['output_reasoning_tokens'])\n",
    "    response_length_vs_thinking_length.append([thinking_lengths_per_model, response_lengths_per_model, model_name])\n",
    "\n",
    "print('Response length VS CoT length\\n')\n",
    "plot_lista_vs_listb(response_length_vs_thinking_length, x_label=\"CoT length\", y_label=\"Response length\", title=\"CoT length VS Response length\", clipx=(0, None), clipy=(0, None), alpha = 0.3)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cost scatter"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_dollar_cost_scatter(\n",
    "    entries,\n",
    "    title,\n",
    "    x_label,\n",
    "    y_label,\n",
    "    *,\n",
    "    alpha: float = 0.8,\n",
    "    scale: str = \"linear\",\n",
    "    model_families: list[list[str]] | None = None,\n",
    "):\n",
    "    \"\"\"\n",
    "    Scatter-plot “cost vs. score” pairs while obeying three visual rules:\n",
    "\n",
    "    1.  Each *family* is drawn in one colour.\n",
    "        Within that family the **cheapest** model gets an “x”, the\n",
    "        2-nd cheapest a square “s”, and the 3-rd cheapest a diamond “D”.\n",
    "        (Families are assumed to contain ≤ 3 models.)\n",
    "\n",
    "        Labels that do **not** belong to any family (“orphans”) are drawn\n",
    "        with a circle “o”, each in its own colour.\n",
    "\n",
    "    2.  For a log-scaled x-axis, tick labels remain plain-decimal\n",
    "        (e.g. “0.01”, not “1e-2”).\n",
    "\n",
    "    3.  Legend ordering\n",
    "        • All *families* are listed first, with **each family reversed**\n",
    "          (i.e. the last label in the family list appears first).\n",
    "        • Orphans are listed after all families.\n",
    "\n",
    "    Parameters\n",
    "    ----------\n",
    "    entries : list[(float, float, str)]\n",
    "        (cost, score, label) triplets.\n",
    "    title, x_label, y_label : str\n",
    "        Axis metadata.\n",
    "    alpha : float, optional\n",
    "        Point opacity.\n",
    "    scale : {\"linear\", \"log\"}, optional\n",
    "        X-axis scale.  Default is \"linear\".\n",
    "    model_families : list[list[str]] | None\n",
    "        Lists of labels that belong together.\n",
    "    \"\"\"\n",
    "    import numpy as np\n",
    "    import matplotlib.pyplot as plt\n",
    "    from matplotlib.ticker import FuncFormatter\n",
    "\n",
    "    if model_families is not None:\n",
    "        model_families = [[models_to_canonical_name[model] for model in family] for family in model_families]\n",
    "    else:\n",
    "        model_families = []\n",
    "\n",
    "    RANK_MARKERS = (\"x\", \"s\", \"D\")     # cheapest → most-expensive\n",
    "    ORPHAN_MARKER = \"o\"\n",
    "\n",
    "    colour_cycle = itertools.cycle(\n",
    "        plt.rcParams[\"axes.prop_cycle\"].by_key()[\"color\"]\n",
    "    )\n",
    "\n",
    "\n",
    "    grouped: dict[str, dict[str, list[float]]] = {}\n",
    "    for x, y, label in entries:\n",
    "        grouped.setdefault(label, {\"x\": [], \"y\": []})\n",
    "        grouped[label][\"x\"].append(x)\n",
    "        grouped[label][\"y\"].append(y)\n",
    "\n",
    "\n",
    "    label_to_colour: dict[str, str] = {}\n",
    "    label_to_marker: dict[str, str] = {}\n",
    "\n",
    "    # 2.1 Families (shared colour, rank-based marker)\n",
    "    for family in model_families:\n",
    "        fam_colour = next(colour_cycle)\n",
    "\n",
    "        # Determine the *cost* rank of each member (ascending)\n",
    "        ranked = sorted(\n",
    "            (lbl for lbl in family if lbl in grouped),\n",
    "            key=lambda lbl: np.mean(grouped[lbl][\"x\"]),\n",
    "        )\n",
    "        for rank, lbl in enumerate(ranked):\n",
    "            label_to_colour[lbl] = fam_colour\n",
    "            label_to_marker[lbl] = RANK_MARKERS[min(rank, 2)]\n",
    "\n",
    "    # 2.2 Orphans (unique colour, circle marker)\n",
    "    for lbl in grouped:\n",
    "        if lbl not in label_to_marker:\n",
    "            label_to_colour[lbl] = next(colour_cycle)\n",
    "            label_to_marker[lbl] = ORPHAN_MARKER\n",
    "\n",
    "\n",
    "    plt.figure()\n",
    "    label_to_handle: dict[str, plt.Artist] = {}\n",
    "    for lbl, pts in grouped.items():\n",
    "        h = plt.scatter(\n",
    "            pts[\"x\"],\n",
    "            pts[\"y\"],\n",
    "            marker=label_to_marker[lbl],\n",
    "            color=label_to_colour[lbl],\n",
    "            alpha=alpha,\n",
    "            label=lbl,\n",
    "        )\n",
    "        label_to_handle[lbl] = h\n",
    "\n",
    "\n",
    "    for family in model_families:\n",
    "        xs, ys = [], []\n",
    "        for lbl in family:\n",
    "            if lbl in grouped:\n",
    "                xs.append(np.mean(grouped[lbl][\"x\"]))\n",
    "                ys.append(np.mean(grouped[lbl][\"y\"]))\n",
    "        if len(xs) >= 2:\n",
    "            order = np.argsort(xs)                  # cheapest → priciest\n",
    "            xs = np.array(xs)[order]\n",
    "            ys = np.array(ys)[order]\n",
    "            plt.plot(\n",
    "                xs,\n",
    "                ys,\n",
    "                color=label_to_colour[family[0]],\n",
    "                linewidth=1.5,\n",
    "                alpha=min(alpha + 0.1, 1.0),\n",
    "            )\n",
    "\n",
    "    ax = plt.gca()\n",
    "    ax.set_xlabel(x_label)\n",
    "    ax.set_ylabel(y_label)\n",
    "    ax.set_title(title)\n",
    "    ax.tick_params(bottom=True, left=True)\n",
    "\n",
    "    if scale == \"log\":\n",
    "        ax.set_xscale(\"log\")\n",
    "\n",
    "        # Plain-decimal tick labels (“0.01”, not “1e-2”)\n",
    "        def _fmt(val, _pos):\n",
    "            if val == 0:\n",
    "                return \"0\"\n",
    "            s = f\"{val:.6f}\".rstrip(\"0\").rstrip(\".\")\n",
    "            return s\n",
    "\n",
    "        ax.xaxis.set_major_formatter(FuncFormatter(_fmt))\n",
    "    else:\n",
    "        ax.set_xscale(\"linear\")\n",
    "\n",
    "\n",
    "    legend_handles: list[plt.Artist] = []\n",
    "    legend_labels: list[str] = []\n",
    "\n",
    "    # Families\n",
    "    for family in model_families:\n",
    "        for lbl in reversed(family):           # reverse the provided order\n",
    "            if lbl in label_to_handle:\n",
    "                legend_handles.append(label_to_handle[lbl])\n",
    "                legend_labels.append(lbl)\n",
    "\n",
    "    # Orphans\n",
    "    for lbl in grouped:\n",
    "        if lbl not in legend_labels:           # keep insertion order of `grouped`\n",
    "            legend_handles.append(label_to_handle[lbl])\n",
    "            legend_labels.append(lbl)\n",
    "\n",
    "    ax.legend(\n",
    "        handles=legend_handles,\n",
    "        labels=legend_labels,\n",
    "        loc=\"center left\",\n",
    "        bbox_to_anchor=(1.02, 0.5),\n",
    "        borderaxespad=0.0,\n",
    "    )\n",
    "\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## Rubric score VS dollar cost\n",
    "DOLLAR_COST_PER_MODEL = {\n",
    "    'o3': {'input': 10, 'output': 40},\n",
    "    'o3_high': {'input': 10, 'output': 40},\n",
    "    'o3_low': {'input': 10, 'output': 40},\n",
    "    'o4-mini': {'input': 1.1, 'output': 4.4},\n",
    "    'o4-mini_high': {'input': 1.1, 'output': 4.4},\n",
    "    'o4-mini_low': {'input': 1.1, 'output': 4.4},\n",
    "    'gpt-4.1': {'input': 2, 'output': 8},\n",
    "    'gpt-4.1-mini': {'input': 0.4, 'output': 1.6},\n",
    "    'gpt-4.1-nano': {'input': 0.1, 'output': 0.4},\n",
    "    'o1': {'input': 15, 'output': 60},\n",
    "    'o1_high': {'input': 15, 'output': 60},\n",
    "    'o1_low': {'input': 15, 'output': 60},\n",
    "    'o1-pro': {'input': 150, 'output': 600},\n",
    "    'o1-preview': {'input': 15, 'output': 60},\n",
    "    'o1-mini': {'input': 1.1, 'output': 4.4},\n",
    "    'o3-mini': {'input': 1.1, 'output': 4.4},\n",
    "    'o3-mini_low': {'input': 1.1, 'output': 4.4},\n",
    "    'o3-mini_high': {'input': 1.1, 'output': 4.4},\n",
    "    'gpt-4.5-preview': {'input': 75, 'output': 150},\n",
    "    'gpt-4o-2024-08-06': {'input': 2.5, 'output': 10},\n",
    "    'gpt-4o-mini': {'input': 0.15, 'output': 0.6},\n",
    "    'gpt-4-turbo-2024-04-09': {'input': 10, 'output': 30},\n",
    "    'gpt-3.5-turbo-0125': {'input': 0.5, 'output': 1.5},\n",
    "    'gpt-4-0613': {'input': 30, 'output': 60},\n",
    "}\n",
    "\n",
    "MODEL_FAMILIES = [\n",
    "    ['o3_low', 'o3', 'o3_high'],\n",
    "    ['o1_low', 'o1', 'o1_high'],\n",
    "    ['o3-mini_low', 'o3-mini', 'o3-mini_high'],\n",
    "    ['o4-mini_low', 'o4-mini', 'o4-mini_high'],\n",
    "    ['gpt-4.1-nano', 'gpt-4.1-mini', 'gpt-4.1'],\n",
    "    ['gpt-4o-mini', 'gpt-4o-2024-08-06']\n",
    "]\n",
    "rubric_score_cost = []\n",
    "for model_name, results in results_main_eval.items():\n",
    "\n",
    "    average_score = results['metrics']['overall_score']\n",
    "\n",
    "    rubric_scores_per_model = []\n",
    "    input_tokens_list = []\n",
    "    output_tokens_list = []\n",
    "\n",
    "    data = results[\"metadata\"][\"example_level_metadata\"]\n",
    "    for prompt in data:\n",
    "        usage = prompt['usage']\n",
    "        if usage['input_tokens'] is None or usage['output_tokens'] is None:\n",
    "            continue\n",
    "        input_tokens_list.append(usage['input_tokens'])\n",
    "        output_tokens_list.append(usage['output_tokens'])\n",
    "        rubric_scores_per_model.append(rubric_accuracy_per_instance(prompt['rubric_items']))\n",
    "\n",
    "    avg_rubric_score = sum(rubric_scores_per_model) / len(rubric_scores_per_model)\n",
    "    avg_input_tokens = sum(input_tokens_list) / len(input_tokens_list)\n",
    "    avg_output_tokens = sum(output_tokens_list) / len(output_tokens_list)\n",
    "\n",
    "    avg_cost_per_model = (avg_input_tokens * DOLLAR_COST_PER_MODEL[model_name]['input'] + avg_output_tokens * DOLLAR_COST_PER_MODEL[model_name]['output']) / 1e6\n",
    "    rubric_score_cost.append([avg_cost_per_model, avg_rubric_score, models_to_canonical_name[model_name]])\n",
    "\n",
    "plot_dollar_cost_scatter(rubric_score_cost, title=\"Score vs cost across OpenAI model families\", x_label=\"Inference cost per example ($)\", y_label=\"HealthBench score\", model_families=MODEL_FAMILIES, scale='linear')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "plot_dollar_cost_scatter(rubric_score_cost, title=\"HealthBench performance-cost frontier\", x_label=\"Inference cost per example ($)\", y_label=\"HealthBench score\", model_families=MODEL_FAMILIES, scale='log')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "cost_perf_data = pd.DataFrame(rubric_score_cost, columns=['cost_usd', 'performance_pct', 'model'])[['model', 'cost_usd', 'performance_pct']]\n",
    "save_csv_and_print(cost_perf_data, 'cost_perf_data.csv')"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "openai",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.8"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
