{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "353735ec",
   "metadata": {},
   "source": [
    "NOTE: Some data has been redacted (sensitive information, etc.)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f7dcebbf",
   "metadata": {},
   "outputs": [],
   "source": [
    "from dotenv import load_dotenv\n",
    "import os\n",
    "import openai\n",
    "import tag_machine_api\n",
    "import json\n",
    "import psycopg\n",
    "from pathlib import Path\n",
    "import datasets\n",
    "from collections import defaultdict\n",
    "from tqdm import tqdm\n",
    "import yaml\n",
    "import random\n",
    "import time\n",
    "import csv\n",
    "import gzip\n",
    "import re\n",
    "import base64\n",
    "import magic\n",
    "from PIL import Image\n",
    "import io\n",
    "from concurrent.futures import ThreadPoolExecutor, as_completed\n",
    "import numpy as np\n",
    "from traceback import format_exc\n",
    "from typing import TypeVar, Union, Any, Callable\n",
    "from psycopg.types.json import Jsonb\n",
    "from collections import Counter\n",
    "import itertools\n",
    "from contextlib import contextmanager\n",
    "from pydantic import BaseModel, ConfigDict, model_validator, Field, computed_field\n",
    "from psycopg.rows import dict_row, class_row\n",
    "import requests\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import math\n",
    "import dataclasses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d76c1eb7",
   "metadata": {},
   "outputs": [],
   "source": [
    "#CURRENT_RESPONSE_MODEL = \"8gjfxjdm\"\n",
    "CURRENT_RESPONSE_MODEL = \"5i5xmxdx\"\n",
    "#CURRENT_RESPONSE_MODEL = \"kiqxrbng\""
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "722834c3",
   "metadata": {},
   "outputs": [],
   "source": [
    "load_dotenv()\n",
    "api = tag_machine_api.TagMachineAPI(os.environ[\"TAG_MACHINE_TOKEN\"], \"XXXXX\")\n",
    "\n",
    "@contextmanager\n",
    "def db_conn(**kwargs):\n",
    "\tconn = psycopg.connect(dbname='postgres', user='postgres', host=str(Path.cwd().parent / 'pg-socket'), **kwargs)\n",
    "\ttry:\n",
    "\t\twith conn.cursor() as cur:\n",
    "\t\t\tyield conn, cur\n",
    "\texcept Exception as e:\n",
    "\t\traise e\n",
    "\tfinally:\n",
    "\t\tconn.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe2709fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "class EvalSample(BaseModel):\n",
    "\tmodel_config = ConfigDict(frozen=True, revalidate_instances=\"always\", extra='forbid', strict=True, validate_by_alias=True, validate_by_name=True)\n",
    "\tfilehash: bytes = Field(validation_alias=\"image_hash\")\n",
    "\tsystem: str\n",
    "\tquestion: str\n",
    "\tquestion_type: str\n",
    "\ttask_type: str\n",
    "\tid: int | None = None\n",
    "\tground_truth_knowledge: str | None = None\n",
    "\tresponse_a: str | None = None\n",
    "\tresponse_b: str | None = None\n",
    "\tresponse_a_model: str | None = None\n",
    "\tresponse_b_model: str | None = None\n",
    "\tjudge_system: str | None = None\n",
    "\tjudge_user: str | None = None\n",
    "\tjudge_model: str | None = None\n",
    "\tjudge_reasoning: str | None = None\n",
    "\tjudge_winner: str | None = None\n",
    "\tjudge_response_a_score: float | None = None\n",
    "\tjudge_response_b_score: float | None = None\n",
    "\timage_dataurl: str | None = None\n",
    "\tcreated_at: int | None = None\n",
    "\tupdate_sequence: int | None = None\n",
    "\tin_progress: int | None = None\n",
    "\n",
    "\t@computed_field\n",
    "\t@property\n",
    "\tdef is_done(self) -> bool:\n",
    "\t\treturn self.judge_winner is not None\n",
    "\n",
    "\tdef get_image(self) -> \"EvalSample\":\n",
    "\t\timage_data = api.read_image(self.filehash)\n",
    "\t\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\t\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\t\treturn self.model_copy(update={\"image_dataurl\": image_dataurl})\n",
    "\t\n",
    "\t@model_validator(mode=\"before\")\n",
    "\t@classmethod\n",
    "\tdef unpack_messages(cls, data: Any) -> Any:\n",
    "\t\tif isinstance(data, cls):\n",
    "\t\t\treturn data\n",
    "\t\t\n",
    "\t\td: dict[str, Any] = dict(data)\n",
    "\n",
    "\t\tif 'messages' in d and d['messages'] is not None:\n",
    "\t\t\tmsgs: list[dict[str, str]] = d['messages']\n",
    "\n",
    "\t\t\tassert len(msgs) == 2,  f\"Expected 2 messages, got {len(msgs)}\"\n",
    "\t\t\tassert msgs[0]['role'] == 'system', f\"Expected system role, got {msgs[0]['role']}\"\n",
    "\t\t\tassert msgs[1]['role'] == 'user',   f\"Expected user role, got {msgs[1]['role']}\"\n",
    "\n",
    "\t\t\td['system']   = msgs[0]['content']\n",
    "\t\t\td['question'] = msgs[1]['content']\n",
    "\t\t\td.pop('messages', None)\n",
    "\n",
    "\t\treturn d\n",
    "\t\n",
    "\tdef add_to_database(self, cur: psycopg.Cursor) -> \"EvalSample\":\n",
    "\t\tassert self.id is None, \"Sample already has an ID\"\n",
    "\n",
    "\t\tcreated_at = int(time.time()) if self.created_at is None else self.created_at\n",
    "\n",
    "\t\tcur.execute(\"\"\"\n",
    "\t\t\tINSERT INTO alignment_preferences_ai (\n",
    "\t\t\t\timage_hash,\n",
    "\t\t\t\tmessages,\n",
    "\t\t\t\tquestion_type,\n",
    "\t\t\t\ttask_type,\n",
    "\t\t\t\tground_truth_knowledge,\n",
    "\t\t\t\tresponse_a,\n",
    "\t\t\t\tresponse_b,\n",
    "\t\t\t\tresponse_a_model,\n",
    "\t\t\t\tresponse_b_model,\n",
    "\t\t\t\tjudge_system,\n",
    "\t\t\t\tjudge_user,\n",
    "\t\t\t\tjudge_model,\n",
    "\t\t\t\tjudge_reasoning,\n",
    "\t\t\t\tjudge_winner,\n",
    "\t\t\t  \tjudge_response_a_score,\n",
    "\t\t\t  \tjudge_response_b_score,\n",
    "\t\t\t\tcreated_at,\n",
    "\t\t\t\tin_progress\n",
    "\t\t\t) VALUES (\n",
    "\t\t\t\t%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
    "\t\t\t) RETURNING id\n",
    "\t\t\"\"\", (\n",
    "\t\t\tself.filehash,\n",
    "\t\t\tJsonb([\n",
    "\t\t\t\t{\"role\": \"system\", \"content\": self.system},\n",
    "\t\t\t\t{\"role\": \"user\",   \"content\": self.question}\n",
    "\t\t\t]),\n",
    "\t\t\tself.question_type,\n",
    "\t\t\tself.task_type,\n",
    "\t\t\tself.ground_truth_knowledge,\n",
    "\t\t\tself.response_a,\n",
    "\t\t\tself.response_b,\n",
    "\t\t\tself.response_a_model,\n",
    "\t\t\tself.response_b_model,\n",
    "\t\t\tself.judge_system,\n",
    "\t\t\tself.judge_user,\n",
    "\t\t\tself.judge_model,\n",
    "\t\t\tself.judge_reasoning,\n",
    "\t\t\tself.judge_winner,\n",
    "\t\t  \tself.judge_response_a_score,\n",
    "\t\t  \tself.judge_response_b_score,\n",
    "\t\t\tcreated_at,\n",
    "\t\t\tself.in_progress\n",
    "\t\t))\n",
    "\n",
    "\t\trow = cur.fetchone()\n",
    "\t\tassert row is not None, \"Failed to insert sample into database\"\n",
    "\n",
    "\t\treturn self.model_copy(update={\"id\": row[0], \"created_at\": created_at})\n",
    "\t\n",
    "\tdef update_in_database(self, cur: psycopg.Cursor):\n",
    "\t\tassert self.id is not None, \"Sample does not have an ID\"\n",
    "\n",
    "\t\tcur.execute(\"SELECT image_hash, messages, question_type, task_type FROM alignment_preferences_ai WHERE id = %s\", (self.id,))\n",
    "\t\trow = cur.fetchone()\n",
    "\t\tassert row is not None, \"Failed to find sample in database\"\n",
    "\t\tassert row[0] == self.filehash, \"Filehash does not match\"\n",
    "\t\tassert row[1] == [{\"role\": \"system\", \"content\": self.system}, {\"role\": \"user\", \"content\": self.question}], \"Messages do not match\"\n",
    "\t\tassert row[2] == self.question_type, \"Question type does not match\"\n",
    "\t\tassert row[3] == self.task_type, \"Task type does not match\"\n",
    "\n",
    "\t\tcur.execute(\"\"\"\n",
    "\t\t\tUPDATE alignment_preferences_ai\n",
    "\t\t\tSET\n",
    "\t\t\t\tground_truth_knowledge = %s,\n",
    "\t\t\t\tresponse_a = %s,\n",
    "\t\t\t\tresponse_b = %s,\n",
    "\t\t\t\tresponse_a_model = %s,\n",
    "\t\t\t\tresponse_b_model = %s,\n",
    "\t\t\t\tjudge_system = %s,\n",
    "\t\t\t\tjudge_user = %s,\n",
    "\t\t\t\tjudge_model = %s,\n",
    "\t\t\t\tjudge_reasoning = %s,\n",
    "\t\t\t\tjudge_winner = %s,\n",
    "\t\t\t  \tjudge_response_a_score = %s,\n",
    "\t\t\t  \tjudge_response_b_score = %s,\n",
    "\t\t\t\tin_progress = %s\n",
    "\t\t\tWHERE id = %s\n",
    "\t\t\"\"\", (\n",
    "\t\t\tself.ground_truth_knowledge,\n",
    "\t\t\tself.response_a,\n",
    "\t\t\tself.response_b,\n",
    "\t\t\tself.response_a_model,\n",
    "\t\t\tself.response_b_model,\n",
    "\t\t\tself.judge_system,\n",
    "\t\t\tself.judge_user,\n",
    "\t\t\tself.judge_model,\n",
    "\t\t\tself.judge_reasoning,\n",
    "\t\t\tself.judge_winner,\n",
    "\t\t  \tself.judge_response_a_score,\n",
    "\t\t  \tself.judge_response_b_score,\n",
    "\t\t\tself.in_progress,\n",
    "\t\t  \tself.id\n",
    "\t\t))\n",
    "\t\n",
    "\t@staticmethod\n",
    "\tdef get_from_database(conn: psycopg.Connection, id: int) -> \"EvalSample | None\":\n",
    "\t\twith conn.cursor(row_factory=class_row(EvalSample)) as cur:\n",
    "\t\t\tcur.execute(\"SELECT * FROM alignment_preferences_ai WHERE id = %s\", (id,))\n",
    "\t\t\trow = cur.fetchone()\n",
    "\t\t\tif row is None:\n",
    "\t\t\t\treturn None\n",
    "\t\t\t\n",
    "\t\t\treturn row\n",
    "\n",
    "\n",
    "class EvalSampleMulti(BaseModel):\n",
    "\tmodel_config = ConfigDict(frozen=True, revalidate_instances=\"always\", extra='forbid', strict=True, validate_by_alias=True, validate_by_name=True)\n",
    "\tfilehash: bytes = Field(validation_alias=\"image_hash\")\n",
    "\tsystem: str\n",
    "\tquestion: str\n",
    "\tquestion_type: str\n",
    "\tground_truth_knowledge: str | None = None\n",
    "\tresponses: list[str] | None = None\n",
    "\tresponses_model: str | None = None\n",
    "\tjudge_system: str | None = None\n",
    "\tjudge_user: str | None = None\n",
    "\tjudge_model: str | None = None\n",
    "\tjudge_reasoning: str | None = None\n",
    "\tjudge_rankings: list[int] | None = None\n",
    "\timage_dataurl: str | None = None\n",
    "\tid: int | None = None\n",
    "\tcreated_at: int | None = None\n",
    "\tupdate_sequence: int | None = None\n",
    "\tin_progress: int | None = None\n",
    "\n",
    "\t@computed_field\n",
    "\t@property\n",
    "\tdef is_done(self) -> bool:\n",
    "\t\treturn self.judge_rankings is not None\n",
    "\n",
    "\tdef get_image(self) -> \"EvalSampleMulti\":\n",
    "\t\timage_data = api.read_image(self.filehash)\n",
    "\t\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\t\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\t\treturn self.model_copy(update={\"image_dataurl\": image_dataurl})\n",
    "\t\n",
    "\t@model_validator(mode=\"before\")\n",
    "\t@classmethod\n",
    "\tdef unpack_messages(cls, data: Any) -> Any:\n",
    "\t\tif isinstance(data, cls):\n",
    "\t\t\treturn data\n",
    "\t\t\n",
    "\t\td: dict[str, Any] = dict(data)\n",
    "\n",
    "\t\tif 'messages' in d and d['messages'] is not None:\n",
    "\t\t\tmsgs: list[dict[str, str]] = d['messages']\n",
    "\n",
    "\t\t\tassert len(msgs) == 2,  f\"Expected 2 messages, got {len(msgs)}\"\n",
    "\t\t\tassert msgs[0]['role'] == 'system', f\"Expected system role, got {msgs[0]['role']}\"\n",
    "\t\t\tassert msgs[1]['role'] == 'user',   f\"Expected user role, got {msgs[1]['role']}\"\n",
    "\n",
    "\t\t\td['system']   = msgs[0]['content']\n",
    "\t\t\td['question'] = msgs[1]['content']\n",
    "\t\t\td.pop('messages', None)\n",
    "\n",
    "\t\treturn d\n",
    "\t\n",
    "\tdef add_to_database(self, cur: psycopg.Cursor) -> \"EvalSampleMulti\":\n",
    "\t\tassert self.id is None, \"Sample already has an ID\"\n",
    "\n",
    "\t\tcreated_at = int(time.time()) if self.created_at is None else self.created_at\n",
    "\n",
    "\t\tcur.execute(\"\"\"\n",
    "\t\t\tINSERT INTO alignment_rankings_ai (\n",
    "\t\t\t\timage_hash,\n",
    "\t\t\t\tmessages,\n",
    "\t\t\t\tquestion_type,\n",
    "\t\t\t\tground_truth_knowledge,\n",
    "\t\t\t\tresponses,\n",
    "\t\t\t\tresponses_model,\n",
    "\t\t\t\tjudge_system,\n",
    "\t\t\t\tjudge_user,\n",
    "\t\t\t\tjudge_model,\n",
    "\t\t\t\tjudge_reasoning,\n",
    "\t\t\t\tjudge_rankings,\n",
    "\t\t\t\tcreated_at,\n",
    "\t\t\t\tin_progress\n",
    "\t\t\t) VALUES (\n",
    "\t\t\t\t%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s\n",
    "\t\t\t) RETURNING id\n",
    "\t\t\"\"\", (\n",
    "\t\t\tself.filehash,\n",
    "\t\t\tJsonb([\n",
    "\t\t\t\t{\"role\": \"system\", \"content\": self.system},\n",
    "\t\t\t\t{\"role\": \"user\",   \"content\": self.question}\n",
    "\t\t\t]),\n",
    "\t\t\tself.question_type,\n",
    "\t\t\tself.ground_truth_knowledge,\n",
    "\t\t\tJsonb(self.responses),\n",
    "\t\t\tself.responses_model,\n",
    "\t\t\tself.judge_system,\n",
    "\t\t\tself.judge_user,\n",
    "\t\t\tself.judge_model,\n",
    "\t\t\tself.judge_reasoning,\n",
    "\t\t\tJsonb(self.judge_rankings),\n",
    "\t\t\tcreated_at,\n",
    "\t\t\tself.in_progress\n",
    "\t\t))\n",
    "\n",
    "\t\trow = cur.fetchone()\n",
    "\t\tassert row is not None, \"Failed to insert sample into database\"\n",
    "\n",
    "\t\treturn self.model_copy(update={\"id\": row[0], \"created_at\": created_at})\n",
    "\n",
    "\tdef update_in_database(self, cur: psycopg.Cursor):\n",
    "\t\tassert self.id is not None, \"Sample does not have an ID\"\n",
    "\n",
    "\t\tcur.execute(\"SELECT image_hash, messages, question_type FROM alignment_rankings_ai WHERE id = %s\", (self.id,))\n",
    "\t\trow = cur.fetchone()\n",
    "\t\tassert row is not None, \"Failed to find sample in database\"\n",
    "\t\tassert row[0] == self.filehash, \"Filehash does not match\"\n",
    "\t\tassert row[1] == [{\"role\": \"system\", \"content\": self.system}, {\"role\": \"user\", \"content\": self.question}], \"Messages do not match\"\n",
    "\t\tassert row[2] == self.question_type, \"Question type does not match\"\n",
    "\n",
    "\t\tcur.execute(\"\"\"\n",
    "\t\t\tUPDATE alignment_rankings_ai\n",
    "\t\t\tSET\n",
    "\t\t\t\tground_truth_knowledge = %s,\n",
    "\t\t\t\tresponses = %s,\n",
    "\t\t\t\tresponses_model = %s,\n",
    "\t\t\t\tjudge_system = %s,\n",
    "\t\t\t\tjudge_user = %s,\n",
    "\t\t\t\tjudge_model = %s,\n",
    "\t\t\t\tjudge_reasoning = %s,\n",
    "\t\t\t\tjudge_rankings = %s,\n",
    "\t\t\t\tin_progress = %s\n",
    "\t\t\tWHERE id = %s\n",
    "\t\t\"\"\", (\n",
    "\t\t\tself.ground_truth_knowledge,\n",
    "\t\t\tJsonb(self.responses) if self.responses is not None else None,\n",
    "\t\t\tself.responses_model,\n",
    "\t\t\tself.judge_system,\n",
    "\t\t\tself.judge_user,\n",
    "\t\t\tself.judge_model,\n",
    "\t\t\tself.judge_reasoning,\n",
    "\t\t\tJsonb(self.judge_rankings) if self.judge_rankings is not None else None,\n",
    "\t\t\tself.in_progress,\n",
    "\t\t  \tself.id\n",
    "\t\t))\n",
    "\t\n",
    "\t@staticmethod\n",
    "\tdef get_from_database(conn: psycopg.Connection, id: int) -> \"EvalSampleMulti | None\":\n",
    "\t\twith conn.cursor(row_factory=class_row(EvalSampleMulti)) as cur:\n",
    "\t\t\tcur.execute(\"SELECT * FROM alignment_rankings_ai WHERE id = %s\", (id,))\n",
    "\t\t\trow = cur.fetchone()\n",
    "\t\t\tif row is None:\n",
    "\t\t\t\treturn None\n",
    "\t\t\t\n",
    "\t\t\treturn row\n",
    "\n",
    "\n",
    "def list_db_preferences() -> list[EvalSample]:\n",
    "\twith db_conn(row_factory=class_row(EvalSample)) as (conn, cur):\n",
    "\t\tcur.execute(\"SELECT * FROM alignment_preferences_ai\")\n",
    "\t\treturn cur.fetchall() # type: ignore\n",
    "\n",
    "\n",
    "def list_db_rankings() -> list[EvalSampleMulti]:\n",
    "\twith db_conn(row_factory=class_row(EvalSampleMulti)) as (conn, cur):\n",
    "\t\tcur.execute(\"SELECT * FROM alignment_rankings_ai\")\n",
    "\t\treturn cur.fetchall() # type: ignore"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dccdcac8",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Danbooru\n",
    "API_TAG_ID_TO_NAME = {tag.id: tag.name for tag in api.fetch_tags()}\n",
    "DANBOORU_TAG_METADATA = {}\n",
    "\n",
    "for line in Path(\"XXXXX/tags000000000000.json\").read_text().splitlines():\n",
    "\tdata = json.loads(line)\n",
    "\tDANBOORU_TAG_METADATA[data[\"name\"]] = data\n",
    "\n",
    "DANBOORU_TAG_CATEGORY_TO_STR = {\n",
    "\t0: \"general\",\n",
    "\t1: \"artist\",\n",
    "\t3: \"copyright\",\n",
    "\t4: \"character\",\n",
    "\t5: \"meta\",\n",
    "}\n",
    "\n",
    "# e621\n",
    "p = Path(\"XXXXX/tags-2023-07-16.csv.gz\")\n",
    "E621_TAG_METADATA = {}\n",
    "\n",
    "with gzip.open(p, \"rt\") as f:\n",
    "\treader = csv.DictReader(f)\n",
    "\tfor row in reader:\n",
    "\t\tE621_TAG_METADATA[row[\"name\"]] = row\n",
    "\n",
    "E621_TAG_CATEGORY_TO_STR = {\n",
    "\t0: \"general\",\n",
    "\t1: \"artist\",\n",
    "\t3: \"copyright\",\n",
    "\t4: \"character\",\n",
    "\t5: \"species\",\n",
    "\t6: \"invalid\",\n",
    "\t7: \"meta\",\n",
    "\t8: \"lore\",\n",
    "}\n",
    "\n",
    "# Rule34\n",
    "RULE34_TAG_TO_CATEGORY = json.loads(Path(\"XXXXX/rule34_tag_to_category.json\").read_text())\n",
    "RULE34_TAG_TO_CATEGORY = {k: v.replace(\", ambiguous\", \"\").replace(\"metadata\", \"meta\") for k, v in RULE34_TAG_TO_CATEGORY.items()}\n",
    "\n",
    "\n",
    "def image_to_tags(filehash: bytes) -> dict[str, str]:\n",
    "\timage_metadata = api.get_image_metadata(filehash)\n",
    "\ttag_strings = {}\n",
    "\n",
    "\t# Danbooru\n",
    "\tall_tags = [API_TAG_ID_TO_NAME[int(i)] for i in image_metadata.tags.keys()]\n",
    "\n",
    "\t# Sort into categories\n",
    "\tby_category = defaultdict(list)\n",
    "\tfor tag in all_tags:\n",
    "\t\tif tag not in DANBOORU_TAG_METADATA:\n",
    "\t\t\tcategory = \"general\"\n",
    "\t\telse:\n",
    "\t\t\tcategory = DANBOORU_TAG_METADATA[tag]['category']\n",
    "\t\t\tcategory = DANBOORU_TAG_CATEGORY_TO_STR[int(category)]\n",
    "\t\tby_category[category].append(tag)\n",
    "\t\n",
    "\t# Build the final list by first listing by category, and sorted alphabetically within each category\n",
    "\t# General tags are listed last without a category prefix\n",
    "\tall_tags = []\n",
    "\n",
    "\tfor category in [\"artist\", \"copyright\", \"character\", \"meta\"]:\n",
    "\t\ttags = sorted(by_category[category])\n",
    "\t\tfor tag in tags:\n",
    "\t\t\tall_tags.append(f\"{category}:{tag}\")\n",
    "\t\n",
    "\tall_tags.extend(sorted(by_category[\"general\"]))\n",
    "\t\n",
    "\t# Format the tags into a string\n",
    "\tif len(all_tags) > 0:\n",
    "\t\ttag_strings['danbooru'] = \", \".join(all_tags)\n",
    "\n",
    "\t# E621\n",
    "\tif 'e621_tags' in image_metadata.attributes:\n",
    "\t\te621_tags = next(iter(image_metadata.attributes.get('e621_tags', {}).keys()), \"\")\n",
    "\t\te621_tags = e621_tags.split(\" \")\n",
    "\n",
    "\t\t# Sort into categories\n",
    "\t\tby_category = defaultdict(list)\n",
    "\t\tfor tag in e621_tags:\n",
    "\t\t\tif tag not in E621_TAG_METADATA:\n",
    "\t\t\t\tcategory = \"general\"\n",
    "\t\t\telse:\n",
    "\t\t\t\tcategory = int(E621_TAG_METADATA[tag]['category'])\n",
    "\t\t\t\tcategory = E621_TAG_CATEGORY_TO_STR[category]\n",
    "\t\t\n",
    "\t\t\tby_category[category].append(tag)\n",
    "\t\t\n",
    "\t\t# Build the final list by first listing by category, and sorted alphabetically within each category\n",
    "\t\t# General tags are listed last without a category prefix\n",
    "\t\tall_tags = []\n",
    "\n",
    "\t\tfor category in [\"artist\", \"copyright\", \"character\", \"species\", \"meta\", \"lore\"]:\n",
    "\t\t\ttags = sorted(by_category[category])\n",
    "\t\t\tfor tag in tags:\n",
    "\t\t\t\tall_tags.append(f\"{category}:{tag}\")\n",
    "\t\t\n",
    "\t\tall_tags.extend(sorted(by_category[\"general\"]))\n",
    "\t\t\n",
    "\t\t# Format the tags into a string\n",
    "\t\tif len(all_tags) > 0:\n",
    "\t\t\ttag_strings['e621'] = \", \".join(all_tags)\n",
    "\n",
    "\t# Rule34\n",
    "\tif 'rule34_tags' in image_metadata.attributes:\n",
    "\t\tr34_tags = next(iter(image_metadata.attributes.get('rule34_tags', {}).keys()), \"\")\n",
    "\t\tr34_tags = [tag.strip() for tag in r34_tags.split(\" \") if tag.strip() != \"\"]\n",
    "\n",
    "\t\t# Sort into categories\n",
    "\t\tby_category = defaultdict(list)\n",
    "\t\tfor tag in r34_tags:\n",
    "\t\t\tif tag not in RULE34_TAG_TO_CATEGORY:\n",
    "\t\t\t\tcategory = \"invalid\"\n",
    "\t\t\telse:\n",
    "\t\t\t\tcategory = RULE34_TAG_TO_CATEGORY[tag]\n",
    "\t\t\n",
    "\t\t\tby_category[category].append(tag)\n",
    "\t\t\n",
    "\t\t# Build the final list by first listing by category, and sorted alphabetically within each category\n",
    "\t\t# General tags are listed last without a category prefix\n",
    "\t\tall_tags = []\n",
    "\n",
    "\t\tfor category in [\"artist\", \"copyright\", \"character\", \"species\", \"meta\", \"lore\"]:\n",
    "\t\t\ttags = sorted(by_category[category])\n",
    "\t\t\tfor tag in tags:\n",
    "\t\t\t\tall_tags.append(f\"{category}:{tag}\")\n",
    "\t\t\n",
    "\t\tall_tags.extend(sorted(by_category[\"general\"]))\n",
    "\t\t\n",
    "\t\t# Format the tags into a string\n",
    "\t\tif len(all_tags) > 0:\n",
    "\t\t\ttag_strings['rule34'] = \", \".join(all_tags)\n",
    "\t\n",
    "\treturn tag_strings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ec27a9a8",
   "metadata": {},
   "outputs": [],
   "source": [
    "DESIRED_SOURCES = {\n",
    "\t\"XXXXX\": 2048*4,\n",
    "\t\"XXXXX\": 128*4,\n",
    "\t\"XXXXX\": 768*4,\n",
    "\t\"XXXXX\": 768*4,\n",
    "\t\"XXXXX\": 512*4,\n",
    "\t\"XXXXX\": 512*4,\n",
    "\t\"XXXXX\": 128*4,\n",
    "\t\"XXXXX\": 128*4,\n",
    "\t\"XXXXX\": 2048*4,\n",
    "\t\"XXXXX\": 768*4,\n",
    "\t\"XXXXX\": 128*4,\n",
    "\t\"XXXXX\": 512*4,\n",
    "\t\"XXXXX\": 512*4,\n",
    "}\n",
    "\n",
    "\n",
    "API_IMAGES_BY_SOURCE = {}\n",
    "\n",
    "for source in tqdm(DESIRED_SOURCES.keys()):\n",
    "\tresults = api.search(f\"source='{source}'\", [\"hash\"])\n",
    "\tassert all(len(filehash) == 32 for filehash in results)\n",
    "\tAPI_IMAGES_BY_SOURCE[source] = set(filehash.tobytes() for filehash in results)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0b37ba2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load our training dataset\n",
    "source_ds = datasets.load_dataset(\"fancyfeast/joy-captioning-20250408a\")\n",
    "assert isinstance(source_ds, datasets.DatasetDict)\n",
    "TRAINED_FILEHASHES = set()\n",
    "\n",
    "for split in source_ds.keys():\n",
    "\tTRAINED_FILEHASHES.update(source_ds[split][\"filehash\"])\n",
    "\n",
    "TRAINED_BY_TYPE = defaultdict(list)\n",
    "for example in tqdm(source_ds['train']):\n",
    "\tquestion_type = example[\"question_type\"]\n",
    "\tTRAINED_BY_TYPE[question_type].append(example)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "baea9b3f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load prompt mappings\n",
    "PROMPT_MAPPINGS = yaml.safe_load(Path(\"XXXXX/prompt-mappings.yaml\").read_text())\n",
    "PROMPT_EXTENSIONS = [line.strip() for line in Path(\"XXXXX/EXTENSIONS.txt\").read_text().splitlines() if line.strip()]\n",
    "\n",
    "# Gather a list of API images we can use\n",
    "API_IMAGES_TO_USE = []\n",
    "\n",
    "for source, desired_count in DESIRED_SOURCES.items():\n",
    "\t# Exclude any images that the model was either trained, tested, or validated on\n",
    "\tapi_images = API_IMAGES_BY_SOURCE[source] - TRAINED_FILEHASHES\n",
    "\n",
    "\t# Sort by hash for determinism\n",
    "\tapi_images = sorted(list(api_images))\n",
    "\n",
    "\t# Truncate\n",
    "\tapi_images = api_images[:desired_count*2]\n",
    "\n",
    "\tAPI_IMAGES_TO_USE.extend(api_images)\n",
    "\n",
    "print(f\"# API images to use: {len(API_IMAGES_TO_USE)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2dafeab",
   "metadata": {},
   "outputs": [],
   "source": [
    "NAMES = json.loads(Path(\"XXXXX/names.json\").read_text())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "92b25a39",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build our ground truth answers based on our VQA database\n",
    "# VQA with a vqa_category is human verified\n",
    "results = api.search(\"EXISTS(vqa_category) AND EXISTS(questionAnswer)\", [\"hash\", \"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "VQA_GROUND_TRUTH_ANSWERS = {}\n",
    "\n",
    "for result in tqdm(results):\n",
    "\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.attributes is not None and result.hash is not None\n",
    "\tquestionAnswer, = result.attributes['questionAnswer'].keys()\n",
    "\tquestionAnswer = json.loads(questionAnswer)\n",
    "\tquestion = questionAnswer['question'].strip()\n",
    "\tanswer = questionAnswer['answer'].strip()\n",
    "\tvqa_category, = result.attributes['vqa_category'].keys()\n",
    "\n",
    "\t# Handle system message being in the question\n",
    "\tif \"<system>\" in question:\n",
    "\t\ti = question.index(\"<system>\")\n",
    "\t\tj = question.index(\"</system>\")\n",
    "\t\tquestion = question[:i] + question[j + len(\"</system>\"):]\n",
    "\t\n",
    "\tquestion = question.strip()\n",
    "\n",
    "\tif vqa_category.strip() == \"\" or question == \"\" or answer == \"\":\n",
    "\t\tcontinue\n",
    "\n",
    "\tVQA_GROUND_TRUTH_ANSWERS[(result.hash, question)] = answer\n",
    "\n",
    "results = api.search(\"EXISTS(allenai_pixmo_ask_model_anything_question) AND EXISTS(allenai_pixmo_ask_model_anything_answer)\", [\"hash\", \"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "\n",
    "for result in tqdm(results):\n",
    "\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.attributes is not None and result.hash is not None\n",
    "\n",
    "\tquestion, = result.attributes['allenai_pixmo_ask_model_anything_question'].keys()\n",
    "\tanswer, = result.attributes['allenai_pixmo_ask_model_anything_answer'].keys()\n",
    "\n",
    "\tquestion = question.strip()\n",
    "\tanswer = answer.strip()\n",
    "\n",
    "\tif question == \"\" or answer == \"\":\n",
    "\t\tcontinue\n",
    "\n",
    "\tif (result.hash, question) in VQA_GROUND_TRUTH_ANSWERS:\n",
    "\t\tcontinue\n",
    "\n",
    "\tVQA_GROUND_TRUTH_ANSWERS[(result.hash, question)] = answer\n",
    "\n",
    "print(f\"# VQA ground truth answers: {len(VQA_GROUND_TRUTH_ANSWERS)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "73c1bd4e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def build_ground_truth_knowledge(filehash: bytes, question: str) -> str | None:\n",
    "\tground_truth_reference = \"\"\n",
    "\n",
    "\t# Gather tags\n",
    "\ttag_strings = image_to_tags(filehash)\n",
    "\ttag_reference = \"\\n\".join([f\"**{source} tags**: {tag_string}\" for source, tag_string in tag_strings.items()])\n",
    "\tif tag_reference.strip() != \"\":\n",
    "\t\tground_truth_reference += f\"<ground_truth_tags>{tag_reference.strip()}</ground_truth_tags>\\n\"\n",
    "\t\tground_truth_reference += \"You are provided with the tags associated with the user's image between <ground_truth_tags> and </ground_truth_tags> above. You can use these ground truth tags to assist in assessing the accuracy of the responses. You should assume that the ground truth tags are correct but not exhaustive (they may not cover all applicable tags). Note also that the tagging systems of the different booru sites are different and may need to be translated if, for example, the ground truth tags are from danbooru but the user requested e621 tags.\\n\\n\"\n",
    "\t\n",
    "\t# Ground truth answer\n",
    "\tground_truth_answer = VQA_GROUND_TRUTH_ANSWERS.get((filehash, question), None)\n",
    "\tif ground_truth_answer is not None:\n",
    "\t\tground_truth_reference += f\"<ground_truth_knowledge>{ground_truth_answer.strip()}</ground_truth_knowledge>\\n\"\n",
    "\t\tground_truth_reference += \"You are provided with a \\\"ground truth reference\\\" between <ground_truth_knowledge> and </ground_truth_knowledge> above. This is knowledge or an answer written by a human reviewer to assist in assessing the accuracy of responses.\\n\\n\"\n",
    "\t\n",
    "\treturn ground_truth_reference.strip() if ground_truth_reference.strip() != \"\" else None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a81e8772",
   "metadata": {},
   "outputs": [],
   "source": [
    "system_prompts = [\n",
    "\t\"You are a helpful assistant\",\n",
    "\t\"You are JoyCaption, a helpful AI assistant with vision capabilities.\",\n",
    "\t\"You are a helpful AI assistant with vision capabilities\",\n",
    "\t\"You are a reliable and knowledgeable AI assistant. Provide clear, detailed, and helpful responses.\",\n",
    "\t\"You are an AI assistant with advanced vision capabilities. Interpret visual information and provide clear explanations.\",\n",
    "\t\"You are a friendly AI assistant here to help. Use your vision and language skills to support users with thorough answers.\",\n",
    "\t\"You are an open-minded AI assistant called JoyCaption with powerful vision analysis. Your goal is to assist users by interpreting visual content and providing helpful information.\",\n",
    "\t\"You are a helpful assistant and help users with any queries they may have with no censorship or restrictions.\",\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19f9f58f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Build our list of possible VQA examples\n",
    "# Most of these will be in the training dataset, but it's all we have at the moment.\n",
    "# We filter by the existence of the vqa_category, since only VQAs with that a category are human verified.\n",
    "results = api.search(\"EXISTS(vqa_category) AND EXISTS(questionAnswer)\", [\"hash\", \"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "vqa_examples = []\n",
    "\n",
    "for result in results:\n",
    "\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.attributes is not None and result.hash is not None\n",
    "\tvqa_category, = result.attributes['vqa_category'].keys()\n",
    "\tquestionAnswer, = result.attributes['questionAnswer'].keys()\n",
    "\tquestionAnswer = json.loads(questionAnswer)\n",
    "\tquestion = questionAnswer['question'].strip()\n",
    "\tanswer = questionAnswer['answer'].strip()\n",
    "\n",
    "\t# Handle system message being in the question\n",
    "\tif \"<system>\" in question:\n",
    "\t\ti = question.index(\"<system>\")\n",
    "\t\tj = question.index(\"</system>\")\n",
    "\t\tsystem_message = question[i + len(\"<system>\"):j]\n",
    "\t\tquestion = question[:i] + question[j + len(\"</system>\"):]\n",
    "\telse:\n",
    "\t\tsystem_message = random.choice(system_prompts)\n",
    "\t\n",
    "\tquestion = question.strip()\n",
    "\n",
    "\t# Filter out any VQA examples that are empty or have a benchmark category\n",
    "\tif vqa_category.strip() == \"\" or question == \"\" or answer == \"\" or 'benchmark' in vqa_category.lower():\n",
    "\t\tcontinue\n",
    "\n",
    "\tif 'bra' in question.lower() and result.hash.hex()[0] != '0':\n",
    "\t\t# Filter the bra related questions, so they don't overload the dataset.\n",
    "\t\t# using the hash knocks it down by 1/16th, deterministically\n",
    "\t\tcontinue\n",
    "\n",
    "\tvqa_examples.append(EvalSample(\n",
    "\t\tfilehash=result.hash,\n",
    "\t\tsystem=system_message,\n",
    "\t\tquestion=question,\n",
    "\t\tquestion_type='questionAnswer',\n",
    "\t\ttask_type='overall',\n",
    "\t))\n",
    "\n",
    "print(f\"# VQA examples: {len(vqa_examples)}\")\n",
    "\n",
    "# Inject some AllenAI vqa data\n",
    "results = api.search(\"source='allenai_pixmo_ask_model_anything' AND exists(allenai_pixmo_ask_model_anything_question)\", [\"hash\", \"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "allenai_examples = []\n",
    "\n",
    "for result in tqdm(results):\n",
    "\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.attributes is not None and result.hash is not None\n",
    "\tif 'vqa_category' in result.attributes:\n",
    "\t\t# Should already be in our dataset, so skip\n",
    "\t\tcontinue\n",
    "\n",
    "\tquestion, = result.attributes['allenai_pixmo_ask_model_anything_question'].keys()\n",
    "\tanswer, = result.attributes['allenai_pixmo_ask_model_anything_answer'].keys()\n",
    "\n",
    "\tquestion = question.strip()\n",
    "\tanswer = answer.strip()\n",
    "\tif question == \"\" or answer == \"\":\n",
    "\t\tcontinue\n",
    "\n",
    "\tallenai_examples.append(EvalSample(\n",
    "\t\tfilehash=result.hash,\n",
    "\t\tsystem=random.choice(system_prompts),\n",
    "\t\tquestion=question,\n",
    "\t\tquestion_type='questionAnswer',\n",
    "\t\ttask_type='overall',\n",
    "\t))\n",
    "\n",
    "allenai_examples.sort(key=lambda e: e.filehash)\n",
    "vqa_examples.extend(allenai_examples[:3500])\n",
    "\n",
    "print(f\"Extended VQA to {len(vqa_examples)} examples\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "250d5269",
   "metadata": {},
   "outputs": [],
   "source": [
    "def _longest_internal_repeat(s: str) -> int:\n",
    "\t\"\"\"\n",
    "\tCrude check for “aaa… / ab_ab_ab…” patterns inside ONE very-long token.\n",
    "\tReturns length of the largest substring that appears ≥ 4 contiguous times.\n",
    "\t\"\"\"\n",
    "\tmax_len = len(s) // 4                       # need 4× to be interesting\n",
    "\tfor size in range(1, max_len + 1):\n",
    "\t\tchunk = s[:size]\n",
    "\t\tif chunk * (len(s) // size) in s:\n",
    "\t\t\treturn size * (len(s) // size)\n",
    "\treturn 0\n",
    "\n",
    "\n",
    "def is_repper(text: str) -> bool:\n",
    "\t\"\"\"\n",
    "\tConservative detector for useless repetition / tag-spam.\n",
    "\n",
    "\tReturns True only when repetition is very likely harmful.\n",
    "\tFive lightweight heuristics are combined; thresholds are tuned\n",
    "\tto avoid false-positives on ordinary prose or short enumerations.\n",
    "\t\"\"\"\n",
    "\n",
    "\ttokens = [t.lower() for t in re.split(r\"\\W+\", text) if t]\n",
    "\tn = len(tokens)\n",
    "\tif n < 30:                                     # tiny blocks → never flag\n",
    "\t\treturn False\n",
    "\n",
    "\t# --------------------------------------------------- 1\n",
    "\t# ≥10 identical tokens BACK-TO-BACK\n",
    "\tif sum(tokens[i] == tokens[i - 1] for i in range(1, n)) >= 10:\n",
    "\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 2\n",
    "\t# Heavy duplicate ratio on “content” words (len ≥4)\n",
    "\tcontent = [t for t in tokens if len(t) >= 4]\n",
    "\tif len(content) >= 50:\n",
    "\t\tdup_count  = len(content) - len(set(content))\n",
    "\t\tdup_ratio  = dup_count / len(content)\n",
    "\t\tif dup_ratio > 0.55 and dup_count >= 100:      # both conditions\n",
    "\t\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 3\n",
    "\t# Repeated 3-gram loops (“water ocean waves …”)\n",
    "\ttris = [' '.join(tokens[i:i + 3]) for i in range(n - 2)]\n",
    "\tif tris:\n",
    "\t\tmost_common_tri, freq = Counter(tris).most_common(1)[0]\n",
    "\t\tif freq >= 10 and freq / len(tris) > 0.20:\n",
    "\t\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 4\n",
    "\t# Colon-style tag duplication  (e.g.   meta:icon_set … )\n",
    "\tcolon_tags = [t for t in tokens if ':' in t]\n",
    "\tif len(colon_tags) >= 30:\n",
    "\t\ttag_dup = len(colon_tags) - len(set(colon_tags))\n",
    "\t\tif tag_dup >= 20 and tag_dup / len(colon_tags) > 0.30:\n",
    "\t\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 5A\n",
    "\t#   Common-prefix flood  (tokens differ only by added suffix words)\n",
    "\tif n >= 50:\n",
    "\t\tfor k in (15, 20, 25):                       # test several slice sizes\n",
    "\t\t\tcounts = Counter(t[:k] for t in tokens)\n",
    "\t\t\ttop_slice, freq = counts.most_common(1)[0]\n",
    "\t\t\tuniq_with_slice = {t for t in tokens if t.startswith(top_slice)}\n",
    "\t\t\tif freq / n > 0.18 and len(uniq_with_slice) >= 15:\n",
    "\t\t\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 5B\n",
    "\t#   Internal repetition inside ONE very long token\n",
    "\tfor tok in tokens:\n",
    "\t\tif len(tok) > 80 and _longest_internal_repeat(tok) / len(tok) > 0.65:\n",
    "\t\t\treturn True\n",
    "\n",
    "\t# --------------------------------------------------- 5C\n",
    "\t#   Numeric-suffix families (“foo_1 … foo_40”)\n",
    "\tnum_suffix = re.compile(r'^(.+?)_(\\d{1,3})$')\n",
    "\tbuckets: dict[str, set[int]] = {}\n",
    "\tfor t in tokens:\n",
    "\t\tm = num_suffix.match(t)\n",
    "\t\tif m:\n",
    "\t\t\tbuckets.setdefault(m.group(1), set()).add(int(m.group(2)))\n",
    "\tif any(len(nums) >= 15 for nums in buckets.values()):\n",
    "\t\treturn True\n",
    "\n",
    "\treturn False\n",
    "\n",
    "\n",
    "def generate_responses(example: EvalSample) -> EvalSample:\n",
    "\tfor _ in range(10):\n",
    "\t\tresponse_a = generate_response(example)\n",
    "\t\tresponse_b = generate_response(example)\n",
    "\n",
    "\t\tif response_a != response_b:\n",
    "\t\t\treturn example.model_copy(update={\"response_a\": response_a, \"response_b\": response_b, \"response_a_model\": CURRENT_RESPONSE_MODEL, \"response_b_model\": CURRENT_RESPONSE_MODEL})\n",
    "\t\n",
    "\traise RuntimeError(\"Failed to generate two different responses after 10 attempts\")\n",
    "\n",
    "\n",
    "def generate_responses_multi(example: EvalSampleMulti, n: int) -> EvalSampleMulti:\n",
    "\tresponses = set()\n",
    "\tfor i in range(n*3):\n",
    "\t\tresponse = generate_response(example)\n",
    "\n",
    "\t\t# Try to ensure at least one response isn't a repper\n",
    "\t\tif is_repper(response) and all(is_repper(r) for r in responses) and i < ((n*3) - 1) and len(responses) > (n//2):\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tresponses.add(response)\n",
    "\n",
    "\t\tif len(responses) == n:\n",
    "\t\t\treturn example.model_copy(update={\"responses\": list(responses), \"responses_model\": CURRENT_RESPONSE_MODEL})\n",
    "\t\n",
    "\traise RuntimeError(\"Failed to generate enough different responses\")\n",
    "\n",
    "\n",
    "def generate_response(example: EvalSample | EvalSampleMulti, logprobs: bool = False) -> str:\n",
    "\tassert example.image_dataurl is not None, f\"Missing image data URL for {example.filehash}\"\n",
    "\tclient = openai.Client(base_url=\"http://localhost:5052/v1\", api_key=\"token-abc123\")\n",
    "\ttemperature = random.uniform(0.4, 1.0)\n",
    "\n",
    "\tresponse = client.chat.completions.create(\n",
    "\t\tmodel=CURRENT_RESPONSE_MODEL,\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": example.system,\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": example.question.strip(),\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": example.image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\ttemperature=temperature,\n",
    "\t\ttop_p=0.95,\n",
    "\t\tmax_tokens=512,\n",
    "\t\tlogprobs=logprobs,\n",
    "\t)\n",
    "\tclient.close()\n",
    "\n",
    "\tassert len(response.choices) == 1, f\"Expected 1 responses, got {len(response.choices)}\"\n",
    "\tassert response.choices[0].message.content is not None, \"Response content is None\"\n",
    "\tresponse_a = response.choices[0].message.content.strip()\n",
    "\tif logprobs:\n",
    "\t\treturn response_a, response.choices[0].logprobs.content\n",
    "\n",
    "\treturn response_a"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d81c0296",
   "metadata": {},
   "outputs": [],
   "source": [
    "question_types = {\n",
    "\t\"questionAnswer\": 2000,\n",
    "\t\"default-descriptive\": 2000,\n",
    "\t\"training_prompt\": 2000,\n",
    "\t\"all_tags\": 2000,\n",
    "\t\"midjourney\": 1000,\n",
    "\t\"straight-forward\": 2000,\n",
    "\t\"social-media\": 200,\n",
    "\t\"booru-tags\": 200,\n",
    "\t\"product-listing\": 200,\n",
    "\t\"art-critic\": 200,\n",
    "\t\"consise-descriptive\": 200,\n",
    "}\n",
    "\n",
    "# Normalize\n",
    "n = sum(question_types.values())\n",
    "question_types = {k: v / n for k, v in question_types.items()}\n",
    "\n",
    "\n",
    "def get_random_prompt(question_type: str, filehash: bytes) -> str:\n",
    "\t\"\"\"\n",
    "\tBased on the question type, return a random user question/prompt\n",
    "\t\"\"\"\n",
    "\tif question_type == 'all_tags':\n",
    "\t\tsources = set(source for source in API_IMAGES_BY_SOURCE.keys() if filehash in API_IMAGES_BY_SOURCE[source])\n",
    "\n",
    "\t\tsource_to_use = random.choice([\"danbooru\", \"e621\", \"rule34\"])\n",
    "\t\tfor source in [\"danbooru\", \"e621\", \"rule34\"]:\n",
    "\t\t\tif source in sources:\n",
    "\t\t\t\tsource_to_use = source\n",
    "\t\t\t\tbreak\n",
    "\t\t\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts']['all_tags'][source_to_use])\n",
    "\t\treturn prompt.strip()\n",
    "\n",
    "\tn_words = random.randint(20, 300)\n",
    "\tn_words = (n_words // 10) * 10\n",
    "\tlength = random.choice(['very short', 'short', 'medium-length', 'long', 'very long'])\n",
    "\n",
    "\tx = random.random()\n",
    "\ty = random.random()\n",
    "\n",
    "\tif x < 0.33 and y < 0.1 and question_type == 'default-descriptive':\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['informal'])\n",
    "\telif x < 0.33:\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['formal'])\n",
    "\telif x < 0.66 and y < 0.1 and question_type == 'default-descriptive':\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['informal,word_count'])\n",
    "\telif x < 0.66:\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['formal,word_count'])\n",
    "\telif y < 0.1 and question_type == 'default-descriptive':\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['informal,length'])\n",
    "\telse:\n",
    "\t\tprompt = random.choice(PROMPT_MAPPINGS['prompts'][question_type]['formal,length'])\n",
    "\t\n",
    "\treturn prompt.format(\n",
    "\t\tword_count=n_words,\n",
    "\t\tlength=length,\n",
    "\t).strip()\n",
    "\n",
    "\n",
    "def _build_hammered_examples_helper(ranking_to_response: Callable[[list[str]], tuple[str, str] | None]):\n",
    "\t# List all existing preferences and rankings\n",
    "\texisting_preferences = defaultdict(list)\n",
    "\tfor example in list_db_preferences():\n",
    "\t\texisting_preferences[(example.filehash, example.system, example.question, example.response_a, example.response_b)].append(example)\n",
    "\n",
    "\t# Go through the rankings and find ones that haven't been head-to-head compared\n",
    "\tnew_examples: list[EvalSample] = []\n",
    "\tfor ranking_example in tqdm(list_db_rankings(), desc=\"Building hammered examples\"):\n",
    "\t\trankings = ranking_example.judge_rankings\n",
    "\t\tif rankings is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif ranking_example.responses_model != CURRENT_RESPONSE_MODEL:\n",
    "\t\t\tcontinue\n",
    "\t\t\n",
    "\t\tassert len(rankings) >= 2, f\"Rankings are not valid for {ranking_example.id}: {rankings}\"\n",
    "\t\tassert ranking_example.responses is not None and len(ranking_example.responses) == len(rankings), f\"Responses are not valid for {ranking_example.id}\"\n",
    "\n",
    "\t\tranked_responses = [ranking_example.responses[i] for i in rankings]\n",
    "\t\tresponses = ranking_to_response(ranked_responses)\n",
    "\t\tif responses is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tassert len(responses) == 2, f\"Expected 2 responses, got {len(responses)}: {responses}\"\n",
    "\n",
    "\t\t# Check if we've already compared these two responses\n",
    "\t\texisting = existing_preferences.get((ranking_example.filehash, ranking_example.system, ranking_example.question, responses[0], responses[1]), [])\n",
    "\t\texisting = existing + existing_preferences.get((ranking_example.filehash, ranking_example.system, ranking_example.question, responses[1], responses[0]), [])\n",
    "\t\texisting = [e for e in existing if e.response_a_model == ranking_example.responses_model and e.response_b_model == ranking_example.responses_model]\n",
    "\t\tif len(existing) > 0:\n",
    "\t\t\t# We already have a preference for these two responses, so skip\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\t# Randomize the order of the responses\n",
    "\t\tresponses = list(responses)\n",
    "\t\trandom.shuffle(responses)\n",
    "\n",
    "\t\t# Create a new example\n",
    "\t\tnew_examples.append(EvalSample(\n",
    "\t\t\tfilehash=ranking_example.filehash,\n",
    "\t\t\tsystem=ranking_example.system,\n",
    "\t\t\tquestion=ranking_example.question,\n",
    "\t\t\tquestion_type=ranking_example.question_type,\n",
    "\t\t\ttask_type=\"overall\",\n",
    "\t\t\tresponse_a=responses[0],\n",
    "\t\t\tresponse_b=responses[1],\n",
    "\t\t\tresponse_a_model=ranking_example.responses_model,\n",
    "\t\t\tresponse_b_model=ranking_example.responses_model,\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(ranking_example.filehash, ranking_example.question),\n",
    "\t\t))\n",
    "\t\texisting_preferences[(ranking_example.filehash, ranking_example.system, ranking_example.question, responses[0], responses[1])].append(new_examples[-1])\n",
    "\t\n",
    "\t# Now insert all the new examples into the database\n",
    "\twith db_conn() as (conn, cur):\n",
    "\t\tnew_examples = [example.add_to_database(cur) for example in tqdm(new_examples)]\n",
    "\t\tconn.commit()\n",
    "\t\n",
    "\tprint(f\"Added {len(new_examples)} new examples to the database\")\n",
    "\n",
    "\n",
    "def build_hammered_examples():\n",
    "\t\"\"\"\n",
    "\tBuild preference pairs from the MultiRanked dataset\n",
    "\tSelects the best and worst responses from the rankings\n",
    "\t\"\"\"\n",
    "\t_build_hammered_examples_helper(lambda ranked_responses: (ranked_responses[0], ranked_responses[-1]))\n",
    "\n",
    "\n",
    "def build_hammered_examples2():\n",
    "\t\"\"\"\n",
    "\tBuild preference pairs from the MultiRanked dataset\n",
    "\tSelects the best and worst responses from the rankings, but ensures the best is non-abnormal and the worst is abnormal\n",
    "\t\"\"\"\n",
    "\tdef _to_responses(ranked_responses: list[str]) -> tuple[str, str] | None:\n",
    "\t\tif len(ranked_responses) < 6:\n",
    "\t\t\treturn None\n",
    "\t\t\n",
    "\t\tbest_responses = [r for r in ranked_responses[:3] if not is_repper(r)]\n",
    "\t\tworst_responses = [r for r in ranked_responses[-3:] if is_repper(r)]\n",
    "\n",
    "\t\treturn (best_responses[0], worst_responses[-1]) if len(best_responses) > 0 and len(worst_responses) > 0 else None\n",
    "\n",
    "\t_build_hammered_examples_helper(_to_responses)\n",
    "\n",
    "\n",
    "def build_hammered_examples3():\n",
    "\t\"\"\"\n",
    "\tBuild preference pairs from the MultiRanked dataset\n",
    "\tSelects the best non-abnormal and worst abnormal responses from the rankings\n",
    "\t\"\"\"\n",
    "\tdef _to_responses(ranked_responses: list[str]) -> tuple[str, str] | None:\n",
    "\t\tif len(ranked_responses) < 6:\n",
    "\t\t\treturn None\n",
    "\n",
    "\t\tresponses = []\n",
    "\t\tfor response in ranked_responses:\n",
    "\t\t\tif len(responses) == 0 and not is_repper(response):\n",
    "\t\t\t\tresponses.append(response)  # Best, non-abnormal\n",
    "\t\t\telif len(responses) == 1 and is_repper(response):\n",
    "\t\t\t\tresponses.append(response)  # First abnormal after best\n",
    "\t\t\t\tbreak\n",
    "\t\t\n",
    "\t\treturn tuple(responses) if len(responses) == 2 else None\n",
    "\n",
    "\t_build_hammered_examples_helper(_to_responses)\n",
    "\n",
    "\n",
    "def build_examples(n: int):\n",
    "\t# List all existing preferences\n",
    "\texisting_types = defaultdict(int)\n",
    "\texisting_filehashes = set()\n",
    "\tfor example in list_db_preferences():\n",
    "\t\texisting_types[example.question_type] += 1\n",
    "\t\texisting_filehashes.add(example.filehash)\n",
    "\n",
    "\t# Figure out what VQA examples we have left and what API images we can use\n",
    "\tremaining_vqa = [e for e in vqa_examples if e.filehash not in existing_filehashes]\n",
    "\tremaining_api_images = list(set(API_IMAGES_TO_USE) - existing_filehashes)\n",
    "\n",
    "\tprint(f\"# remaining VQA examples: {len(remaining_vqa)}\")\n",
    "\tprint(f\"# remaining API images: {len(remaining_api_images)}\")\n",
    "\n",
    "\trandom.shuffle(remaining_vqa)\n",
    "\trandom.shuffle(remaining_api_images)\n",
    "\n",
    "\tprint({k: (int(v * n), existing_types[k]) for k, v in question_types.items()})\n",
    "\n",
    "\ttarget_counts = {k: max(0, int(v * n) - existing_types[k]) for k, v in question_types.items()}\n",
    "\tprint(target_counts)\n",
    "\texamples = []\n",
    "\n",
    "\tfor question_type, target_count in target_counts.items():\n",
    "\t\tfor _ in range(target_count):\n",
    "\t\t\tif question_type == 'questionAnswer':\n",
    "\t\t\t\tif len(remaining_vqa) == 0:\n",
    "\t\t\t\t\tcontinue\n",
    "\n",
    "\t\t\t\texamples.append(remaining_vqa.pop())\n",
    "\t\t\t\tcontinue\n",
    "\n",
    "\t\t\tif len(remaining_api_images) == 0:\n",
    "\t\t\t\tcontinue\n",
    "\n",
    "\t\t\tfilehash = remaining_api_images.pop()\n",
    "\t\t\tsystem = random.choice(system_prompts).strip()\n",
    "\t\t\tquestion = get_random_prompt(question_type, filehash)\n",
    "\t\t\texamples.append(EvalSample(\n",
    "\t\t\t\tfilehash=filehash,\n",
    "\t\t\t\tsystem=system,\n",
    "\t\t\t\tquestion=question,\n",
    "\t\t\t\tquestion_type=question_type,\n",
    "\t\t\t\ttask_type=\"overall\",\n",
    "\t\t\t\tground_truth_knowledge=build_ground_truth_knowledge(filehash, question),\n",
    "\t\t\t))\n",
    "\t\n",
    "\tprint(f\"Generated {len(examples)} examples to add to the database\")\n",
    "\n",
    "\t# Now insert all the new examples into the database\n",
    "\twith db_conn() as (conn, cur):\n",
    "\t\texamples = [example.add_to_database(cur) for example in tqdm(examples)]\n",
    "\t\tconn.commit()\n",
    "\n",
    "\n",
    "def build_ranked_examples(max_n: int | None):\n",
    "\t\"\"\"\n",
    "\tBuild MultiRanked examples based on the existing examples\n",
    "\t\"\"\"\n",
    "\texisting_preferences = list_db_preferences()\n",
    "\texisting_rankings = set()\n",
    "\tfor example in list_db_rankings():\n",
    "\t\texisting_rankings.add((example.filehash, example.system, example.question))\n",
    "\t\n",
    "\trandom.shuffle(existing_preferences)\n",
    "\t\n",
    "\tnew_examples = []\n",
    "\tfor example in tqdm(existing_preferences, desc=\"Building ranked examples\"):\n",
    "\t\tif (example.filehash, example.system, example.question) in existing_rankings:\n",
    "\t\t\tcontinue\n",
    "\t\t\n",
    "\t\t# Generate a new example\n",
    "\t\tnew_examples.append(EvalSampleMulti(\n",
    "\t\t\tfilehash=example.filehash,\n",
    "\t\t\tsystem=example.system,\n",
    "\t\t\tquestion=example.question,\n",
    "\t\t\tquestion_type=question_type,\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(example.filehash, example.question),\n",
    "\t\t))\n",
    "\t\texisting_rankings.add((example.filehash, example.system, example.question))\n",
    "\n",
    "\t\tif max_n is not None and len(new_examples) >= max_n:\n",
    "\t\t\tbreak\n",
    "\t\n",
    "\tprint(f\"Generated {len(new_examples)} rankings to add to the database\")\n",
    "\n",
    "\t# Now insert all the new examples into the database\n",
    "\twith db_conn() as (conn, cur):\n",
    "\t\tfor example in tqdm(new_examples):\n",
    "\t\t\texample.add_to_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "45d7af2e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# System prompt given to the Judge model\n",
    "PROMPT = \"\"\"\n",
    "You are an expert AI Response Evaluator. Your task is to meticulously analyze and compare two distinct AI-generated responses (`Response A` and `Response B`) provided in response to a specific `User Query` and a `User Image`. These responses were generated based on a specific `Original System Prompt` that defined the AI's persona, constraints, and goals.\n",
    "\n",
    "**Your Goal:** Determine which response (`Response A` or `Response B`) is objectively better according to the evaluation criteria outlined below. Provide a clear judgment and a detailed, reasoned justification for your choice.\n",
    "\n",
    "**Input You Will Receive:**\n",
    "\n",
    "1.  `Original System Prompt`: The instructions the AI models were given to generate their responses. Pay close attention to persona, tone, constraints, required format, and specific tasks mentioned here.\n",
    "2.  `User Query`: The specific question or instruction from the user.\n",
    "3.  `User Image`: An image provided by the user, which may be relevant to the query.\n",
    "4.  `Response A`: One of the AI-generated responses.\n",
    "5.  `Response B`: One of the AI-generated responses.\n",
    "\n",
    "**Evaluation Criteria:**\n",
    "\n",
    "1.  **Adherence to Original System Prompt:**\n",
    "    *   Did the response follow ALL instructions, constraints, formatting requirements, persona, and tone specified in the `Original System Prompt`?\n",
    "    *   How well did each response embody the defined persona or role?\n",
    "2.  **Addressing the User Query:**\n",
    "    *   Did the response directly, accurately, and completely answer the `User Query`?\n",
    "    *   Is the response relevant to the user's explicit and implicit needs?\n",
    "3.  **Image Integration:**\n",
    "    *   If an image was provided and relevant, did the response appropriately acknowledge, analyze, or utilize the image content as necessitated by the `User Query` and `Original System Prompt`?\n",
    "4.  **Helpfulness and Usefulness:**\n",
    "    *   How helpful and practical is the response for the user? Does it provide value?\n",
    "5.  **Accuracy and Factual Correctness:**\n",
    "    *   Is the information presented accurate and free from errors? (Acknowledge if you cannot verify).\n",
    "6.  **Clarity, Conciseness, and Structure:**\n",
    "    *   Is the response well-organized, easy to understand, and appropriately concise? Is it free from unnecessary jargon or rambling?\n",
    "7.  **Overall Quality:**\n",
    "    *   Considering all the above, which response provides a superior user experience?\n",
    "\n",
    "**Your Task Steps:**\n",
    "\n",
    "1.  **Understand the Context:** Thoroughly review the `Original System Prompt`, `User Query`, and `User Image`. Understand the *expected* output.\n",
    "2.  **Analyze Response A:** Evaluate `Response A` against all relevant criteria listed above. Note its strengths and weaknesses.\n",
    "3.  **Analyze Response B:** Evaluate `Response B` against all relevant criteria listed above. Note its strengths and weaknesses.\n",
    "4.  **Compare Responses:** Directly compare `Response A` and `Response B` on each relevant criterion. Highlight key differences. Which response performed better on each point?\n",
    "5.  **Make a Judgment:** State clearly which response is better (`Response A` or `Response B`). If they are very close, state that, but still choose the slightly better one if possible. If one is significantly better, note that. In rare cases where both fail significantly or are equally good/bad in offsetting ways, you may note that, but *still attempt* to identify if one has a slight edge based on primary criteria (like prompt adherence and query addressing).\n",
    "6.  **Provide Detailed Justification:** Explain *why* you chose one response over the other. Your justification should:\n",
    "    *   Reference specific aspects of the responses, the `Original System Prompt`, the `User Query`, and the `User Image`.\n",
    "    *   Clearly explain how the chosen response better meets the criteria.\n",
    "    *   Point out the specific failings of the less preferred response.\n",
    "    *   Be structured, clear, and objective.\n",
    "\n",
    "**Note:**\n",
    "\n",
    "*   If the user query sets a maximum word count for the response, the response should be within that limit.\n",
    "*   If the user query asks for the response to be \"very short\", \"short\", \"medium-length\", \"long\", or \"very long\" then treat that as an approximate word count limit where very short is ~20 words, short is ~40 words, medium-length is ~60 words, long is ~100 words, and very long is ~200 words.\n",
    "*   If the user asks for a list of danbooru, e621, or rule34 tags, the response is expected to follow these guidelines unless otherwise specified by the user or system prompt:\n",
    "\t*   Tags should be separated by commas (e.g. \"tag1, tag2, tag3\").\n",
    "\t*   Tags should be in lowercase.\n",
    "\t*   Tags should use underscores instead of spaces (e.g., \"tag_me\").\n",
    "\t*   Tags should be relevant to the content of the image and the user query.\n",
    "\t*   Tags should be based on the tagging system used by the site specified in the user query.\n",
    "\t*   Tags belong to categories, such as artist, character, copyright, etc.  The response should prepend the category to all tags except those in the general category (e.g. \"artist:tag1, character:tag2, copyright:tag3\").\n",
    "\t*   For rule34 or e621, the response should start by listing artist tags, then copyright tags, then character tags, then species tags, then meta tags, then lore tags, and finally general tags.\n",
    "\t*   For danbooru, the response should start with artist tags, then copyright tags, then chracter tags, then meta tags.\n",
    "\t*   Within each category, the tags should be sorted alphabetically.\n",
    "\t*   If the image does not have any tags within a certain category, for example there are no artist tags, then the response should not include that category at all. In other words, it is valid for a response to _not_ have any artist tags listed (or copyright tags, etc).  But if it does list a category the categories must be in the order specified above.\n",
    "\t*   The response should not include any tags that are not in the image or are not relevant to the user query.\n",
    "\t*   Unless specified otherwise by the user or system, the response should include all relavant tags in the image.\n",
    "\t*   Some tags have implicated tags.  For example, if \"long_hair\" is applicable to an image, the tag \"hair\" is also applicable, and it is correct to include both tags.\n",
    "*   Do not bias your evaluations on the order of the responses.  \"A\" and \"B\" have been assigned arbitrarily.  You should evaluate them independently and then compare them.\n",
    "\n",
    "**Output Format:**\n",
    "\n",
    "Structure your evaluation clearly. You might use headings like:\n",
    "\n",
    "*   **Context Summary:** (Briefly summarize the task set by the prompt/query)\n",
    "*   **Analysis of Response A:** (Strengths/Weaknesses against criteria)\n",
    "*   **Analysis of Response B:** (Strengths/Weaknesses against criteria)\n",
    "*   **Comparison:** (Direct point-by-point comparison)\n",
    "*   **Judgment:** (e.g., \"Response B is better.\")\n",
    "*   **Justification:** (Detailed reasoning for the judgment)\n",
    "\n",
    "After your evaluation, at the end of your response, always write a machine parsable output that includes:\n",
    "\n",
    "* Based on your evaluation, the name of the best response between <best> and </best> tags. Either `<best>Response A</best>` or `<best>Response B</best>`.\n",
    "* An overall quality score for Response A between 1 and 10, written between <response_a_score> and </response_a_score> tags. A score of 1 means the response is completely useless, and a score of 10 means the response is perfect.\n",
    "* An overall quality score for Response B between 1 and 10, written between <response_b_score> and </response_b_score> tags. A score of 1 means the response is completely useless, and a score of 10 means the response is perfect.\n",
    "\"\"\"\n",
    "\n",
    "PROMPT_SCORE_MIN = 1\n",
    "PROMPT_SCORE_MAX = 10\n",
    "\n",
    "MULTI_PROMPT = \"\"\"\n",
    "You are an expert AI Response Evaluator. Your task is to meticulously analyze and compare several distinct AI-generated responses (`Response A`, `Response B`, `Response C`, …) that were produced in response to a specific `User Query` and a `User Image`. These responses were generated based on a specific `Original System Prompt` that defined the AI's persona, constraints, and goals.\n",
    "\n",
    "**Your Goal:** Rank **all** responses from **best to worst** according to the evaluation criteria outlined below. Provide a clear judgment and a detailed, reasoned justification for your ordering.\n",
    "\n",
    "**Input You Will Receive:**\n",
    "\n",
    "1.  `Original System Prompt`: The instructions the AI models were given to generate their responses. Pay close attention to persona, tone, constraints, required format, and specific tasks mentioned here.\n",
    "2.  `User Query`: The specific question or instruction from the user.\n",
    "3.  `User Image`: An image provided by the user, which may be relevant to the query.\n",
    "4.  `Response A`, `Response B`, `Response C`, …: The AI-generated responses to be evaluated.\n",
    "\n",
    "**Evaluation Criteria (apply to each response):**\n",
    "\n",
    "1.  **Adherence to Original System Prompt:**\n",
    "    *   Did the response follow ALL instructions, constraints, formatting requirements, persona, and tone specified in the `Original System Prompt`?\n",
    "    *   How well did each response embody the defined persona or role?\n",
    "2.  **Addressing the User Query:**\n",
    "    *   Did the response directly, accurately, and completely answer the `User Query`?\n",
    "    *   Is the response relevant to the user's explicit and implicit needs?\n",
    "3.  **Image Integration:**\n",
    "    *   If an image was provided and relevant, did the response appropriately acknowledge, analyze, or utilize the image content as necessitated by the `User Query` and `Original System Prompt`?\n",
    "4.  **Helpfulness and Usefulness:**\n",
    "    *   How helpful and practical is the response for the user? Does it provide value?\n",
    "5.  **Accuracy and Factual Correctness:**\n",
    "    *   Is the information presented accurate and free from errors? (Acknowledge if you cannot verify).\n",
    "6.  **Clarity, Conciseness, and Structure:**\n",
    "    *   Is the response well-organized, easy to understand, and appropriately concise? Is it free from unnecessary jargon or rambling?\n",
    "7.  **Overall Quality:**\n",
    "    *   Considering all the above, how strong is the user experience provided?\n",
    "\n",
    "**Your Task Steps:**\n",
    "\n",
    "1.  **Understand the Context:** Thoroughly review the `Original System Prompt`, `User Query`, and `User Image`. Understand the *expected* output.\n",
    "2.  **Analyze Each Response Individually:** For every response, evaluate it against all relevant criteria listed above. Note its strengths and weaknesses.\n",
    "3.  **Compare Responses:** Directly contrast the responses on each criterion.\n",
    "4.  **Rank the Responses:** Order them from **best** (highest overall quality) to **worst** (lowest overall quality).\n",
    "5.  **Provide Detailed Justification:** Explain *why* each response occupies its position in the ranking. Reference specific aspects of the responses, the `Original System Prompt`, the `User Query`, and the `User Image`. Be structured, clear, and objective.\n",
    "\n",
    "**Output Format (strict):**\n",
    "\n",
    "Use the following headings *exactly*:\n",
    "\n",
    "* **Context Summary:**  \n",
    "  Briefly summarize what the user asked for and what the system prompt requires.\n",
    "\n",
    "* **Per-Response Analysis:**  \n",
    "  For each response in **ranked order**, supply a short bullet-point list of its key strengths and weaknesses.\n",
    "\n",
    "* **Ranking Explanation:**  \n",
    "  A concise narrative comparing the responses, highlighting decisive factors for the ordering.\n",
    "\n",
    "* **Ranking:**  \n",
    "  A single line listing the response labels from best to worst, comma-separated, enclosed in `<ranking>` tags.  \n",
    "  Example: `<ranking>Response C, Response A, Response B</ranking>`\n",
    "\n",
    "**Notes & Special Rules:**\n",
    "\n",
    "*   If the user query sets a maximum word count for the response, the response should be within that limit.\n",
    "*   If the user query asks for the response to be \"very short\", \"short\", \"medium-length\", \"long\", or \"very long\" then treat that as an approximate word count limit where very short is ~20 words, short is ~40 words, medium-length is ~60 words, long is ~100 words, and very long is ~200 words.\n",
    "*   If the user query asks for the response to be in a casual tone, the response should be written as if it were written by a human with a casual tone, using contractions, slang, and informal language as appropriate. It should avoid sounding like a robot pretending to be casual.\n",
    "*   If the user asks for a list of danbooru, e621, or rule34 tags, the response is expected to follow these guidelines unless otherwise specified by the user or system prompt:\n",
    "\t*   Tags should be separated by commas (e.g. \"tag1, tag2, tag3\").\n",
    "\t*   Tags should be in lowercase.\n",
    "\t*   Tags should use underscores instead of spaces (e.g., \"tag_me\").\n",
    "\t*   Tags should be relevant to the content of the image and the user query.\n",
    "\t*   Tags should be based on the tagging system used by the site specified in the user query.\n",
    "\t*   Tags belong to categories, such as artist, character, copyright, etc.  The response should prepend the category to all tags except those in the general category (e.g. \"artist:tag1, character:tag2, copyright:tag3\").\n",
    "\t*   For rule34 or e621, the response should start by listing artist tags, then copyright tags, then character tags, then species tags, then meta tags, then lore tags, and finally general tags.\n",
    "\t*   For danbooru, the response should start with artist tags, then copyright tags, then chracter tags, then meta tags.\n",
    "\t*   Within each category, the tags should be sorted alphabetically.\n",
    "\t*   If the image does not have any tags within a certain category, for example there are no artist tags, then the response should not include that category at all. In other words, it is valid for a response to _not_ have any artist tags listed (or copyright tags, etc).  But if it does list a category the categories must be in the order specified above.\n",
    "\t*   The response should not include any tags that are not in the image or are not relevant to the user query.\n",
    "\t*   Unless specified otherwise by the user or system, the response should include all relavant tags in the image.\n",
    "\t*   Some tags have implicated tags.  For example, if \"long_hair\" is applicable to an image, the tag \"hair\" is also applicable, and it is correct to include both tags.\n",
    "*   Do **not** use the order in which the candidates are presented as a bias when ranking them; judge solely on the criteria above.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "MULTI_USER_MESSAGE = \"\"\"\n",
    "<original_system_prompt>{system_prompt}</original_system_prompt>\n",
    "<user_query>{user_query}</user_query>\n",
    "{responses}\n",
    "\n",
    "---\n",
    "\n",
    "{ground_truth_reference}\n",
    "\n",
    "**Now, please perform the evaluation based on the instructions provided in your system prompt. Don't forget to clearly mark the ranking in <ranking></ranking> tags at the end of your response, with a single line, comma separated list of the response labels in ranked order from best to worst.**\n",
    "\"\"\"\n",
    "\n",
    "# MULTI_USER_MESSAGE_ALL_TAGS = \"\"\"\n",
    "# <original_system_prompt>{system_prompt}</original_system_prompt>\n",
    "# <user_query>{user_query}</user_query>\n",
    "# {responses}\n",
    "\n",
    "# ---\n",
    "\n",
    "# {ground_truth_reference}\n",
    "# <ground_truth_tags>{ground_truth_tags}</ground_truth_tags>\n",
    "\n",
    "# **Now, please perform the evaluation based on the instructions provided in your system prompt.**\n",
    "# Additionally, the ground truth tags associated with the user's image from {source} have been provided to you. You should assume that these are correct, but not exhaustive. You can use these ground truth tags to assist in assessing the accuracy of the responses.\n",
    "# **Don't forget to clearly mark the ranking in <ranking></ranking> tags at the end of your response, with a single line, comma separated list of the response labels in ranked order from best to worst.**\n",
    "# \"\"\"\n",
    "\n",
    "\n",
    "\n",
    "USER_MESSAGE = \"\"\"\n",
    "<original_system_prompt>{system_prompt}</original_system_prompt>\n",
    "<user_query>{user_query}</user_query>\n",
    "{responses}\n",
    "\n",
    "---\n",
    "\n",
    "{ground_truth_reference}\n",
    "\n",
    "**Now, please perform the evaluation based on the instructions provided in your system prompt. Don't forget to clearly mark the best response in <best></best> tags, and provide a score for each response between 1 and 10, inside <response_a_score></response_a_score> and <response_b_score></response_b_score> tags.**\n",
    "\"\"\"\n",
    "\n",
    "# USER_MESSAGE_ALL_TAGS = \"\"\"\n",
    "# <original_system_prompt>{system_prompt}</original_system_prompt>\n",
    "# <user_query>{user_query}</user_query>\n",
    "# {responses}\n",
    "\n",
    "# ---\n",
    "\n",
    "# {ground_truth_reference}\n",
    "# <ground_truth_tags>{ground_truth_tags}</ground_truth_tags>\n",
    "\n",
    "# **Now, please perform the evaluation based on the instructions provided in your system prompt.**\n",
    "# Additionally, the ground truth tags associated with the user's image from {source} have been provided to you. You should assume that these are correct, but not exhaustive. You can use these ground truth tags to assist in assessing the accuracy of the responses.\n",
    "# **Don't forget to clearly mark the best response in <best></best> tags at the end of your response, and provide a score for each response between 1 and 10, inside <response_a_score></response_a_score> and <response_b_score></response_b_score> tags.**\n",
    "# \"\"\"\n",
    "\n",
    "\n",
    "CAPTION_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "Unless requested otherwise, when you write an image description or caption, you follow these general guidelines:\n",
    "\n",
    "* The description should be detailed.\n",
    "* The description must be accurate.\n",
    "* Include information about both the subject and background, colors, objects, people, clothes, textures, styles, locations of objects in the image, etc.\n",
    "* Include in your caption whether the image is a photograph, drawing, CGI, etc, and what specific style (for example if it is art, what style of art; if it's a drawing is it done in pencil, digital medium, etc).\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "TRAINING_PROMPT_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "Unless requested otherwise, when you write a stable diffusion prompt for an image, you follow these general guidelines:\n",
    "* The prompt must be accurate.\n",
    "* The prompt should match how real human users write stable diffusion prompts.\n",
    "* Stable diffusion prompts often (but not always) use short phrases, descriptive tags, keywords, etc separated by commas, to indicate the desired content of the image succinctly.  Full sentences are less common (though not unheard of).\n",
    "* Stable diffusion prompts often (but not always) use tags from websites like danbooru, e621, rule34, etc.\n",
    "* Stable diffusion prompts are prompts for text-to-image models. Clarity and effectivenss for the model are prioritized over grammatical perfection or narrative flow. They lead with subject or medium, not narration. Prompts usually begin with the main subject (“portrait of…”, “anime girl…”, “oil painting of…”, \"in the style of…\", etc) rather than meta phrases like “This image shows…”, \"You are looking at...\", etc.\n",
    "* Stable diffusion prompts sometimes (but not always) use slang, abbreviations, and shorthand. For example, \"pussy\" instead of \"vagina\", \"anthro\" instead of \"anthropomorphic\", \"nsfw\" instead of \"not safe for work\", \"photo\" instead of \"photograph\", etc.\n",
    "* Order matters: The order of keywords can influence their impact; terms appearing earlier sometimes carry more weight or influence the foundational composition.\n",
    "* Art style descriptors are common in prompts, as well as artist references, to emulate the specific styles.\n",
    "* For photos, prompts often include camera specifications.\n",
    "* Lighting descriptions are common.\n",
    "* Since the goal is for the prompt to recreate the reference image, things like watermarks, compression artifacts, and other image flaws should be included in the prompt if they are present in the reference image.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "MIDJOURNEY_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "Unless requested otherwise, you write a MidJourney prompt that would best recreate the user's image.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "VQA_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "ALL_TAGS_PROMPT = \"\"\"\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "STRAIGHTFORWARD_SYSTEM_PROMPT = \"\"\"\n",
    "When the user asks for an image description, you should craft a confident, direct set of instructions for describing the image in a way that both visually impaired readers and text-to-image models can understand and recreate. Begin by stating the main subject and medium. Immediately highlight pivotal elements—people, objects, scenery—using definite language without any hedging words.\n",
    "\n",
    "Focus on clear, concrete details: colors, shapes, textures, and spatial relationships (foreground, background, left, right). Describe how elements interact (“A golden retriever bounding through tall grass” rather than “A dog seems to be in some field”). Keep the tone casual yet precise, avoiding overly technical or flowery language.\n",
    "\n",
    "Never start with “This image is…”, \"The image is of...\", etc as writing variations like that such as \"This anime illustration...\" harms the text-to-image model due to the repetition of starting with \"This\" or \"The\" or variations and wastes that token as well.\n",
    "\n",
    "The image's medium MUST be included somewhere, as it is the most fundamental detail.\n",
    "\n",
    "Avoid words like \"featuring\", \"depicts\", \"showcasing\", \"revealing\", \"captures\", etc unless necessary, as they are common in more formal, overly flowery writing styles that do not assist a text-to-image model.\n",
    "\n",
    "Do not talk about the mood or feel of the image, as it is subjective and not something that can be recreated by a text-to-image model.\n",
    "\n",
    "Do not use \"likely,\", \"possibly,\" \"appears,\" \"seems,\" \"suggests,\" \"indicates,\" \"indicating\", \"suggesting,\" or variations thereof and similar ambiguous terms. State each observation plainly and confidently.\n",
    "\n",
    "Note: These restrictions do not apply to quotes, dialogue, or text in the image, which should be transcribed verbatim.\n",
    "\n",
    "If there are visible compression artifacts, mention them, but do not speculate on the cause or resolution. If the image is blurry, mention that, but do not guess why it is blurry.\n",
    "\n",
    "If there are watermarks or artist signatures or other parts that identify the source or creator of the image, mention them.\n",
    "\n",
    "Do not use phrasing like \"... is visible\" or \"... can be seen\" as it is redundant (if it's in the description, it's visible), and does not add any information to the description.\n",
    "\n",
    "Vary the order in which you describe elements in the image to keep the description engaging and informative. Do not repeat the same structure for each description. For example some descriptions may start with the subject, others with the background, others with the overall scene, etc.\n",
    "\n",
    "Do not mention things that are not in the image, such as \"No people are visible\" or \"There are no watermarks\" as it is not helpful to a text-to-image model.\n",
    "\n",
    "Never mention resolution. Do not guess at unobservable details. Vary your vocabulary to keep the description lively. Above all, make each sentence purposeful—include only essential information that helps someone visualize or reproduce the scene exactly.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "SOCIAL_MEDIA_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "PRODUCT_LISTING_SYSTEM_PROMPT = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "PRIMARY_SYSTEM_MESSAGE = {\n",
    "\t\"default-descriptive\": CAPTION_SYSTEM_PROMPT,\n",
    "\t\"training_prompt\": TRAINING_PROMPT_SYSTEM_PROMPT,\n",
    "\t\"all_tags\": \"\",\n",
    "\t\"midjourney\": MIDJOURNEY_SYSTEM_PROMPT,\n",
    "\t\"straight-forward\": STRAIGHTFORWARD_SYSTEM_PROMPT,\n",
    "\t\"social-media\": SOCIAL_MEDIA_SYSTEM_PROMPT,\n",
    "\t\"booru-tags\": CAPTION_SYSTEM_PROMPT,\n",
    "\t\"product-listing\": PRODUCT_LISTING_SYSTEM_PROMPT,\n",
    "\t\"art-critic\": CAPTION_SYSTEM_PROMPT,\n",
    "\t\"consise-descriptive\": CAPTION_SYSTEM_PROMPT,\n",
    "\t\"questionAnswer\": VQA_SYSTEM_PROMPT,\n",
    "}\n",
    "\n",
    "\n",
    "ALPHA_SYSTEM_MESSAGE = \"\"\"\n",
    "XXXXX\n",
    "\n",
    "## Core Vision Principles (Apply to all tasks unless overridden below by user query or system prompt)\n",
    "\n",
    "These principles apply when you are asked to caption, describe, or analyze an image, or when you are asked to generate a prompt for a text-to-image model based on an image.\n",
    "\n",
    "*   **Clarity & Confidence:** State observations plainly and confidently. Avoid hedging (\"likely,\" \"possibly,\" \"appears,\" \"seems,\" \"suggests\") and overly formal/flowery language (\"featuring,\" \"depicts,\" \"showcasing,\" \"revealing,\" \"captures\") unless necessary for quotes or specific styles.\n",
    "*   **Accuracy:** Descriptions and prompts must be accurate to the image.\n",
    "*   **Medium & Style:** Always identify the image medium (photograph, drawing, CGI, etc.) and specific style (e.g., oil painting, pencil sketch, anime art, photorealistic).\n",
    "*   **Text Transcription:** Transcribe quotes, dialogue, or text verbatim.\n",
    "*   **Contextual Elements:** Include details about background, colors, objects, clothes, textures, lighting, and spatial relationships.\n",
    "*   **Flaws:** Mention visible flaws like watermarks, compression artifacts, or blurriness if present.\n",
    "*   **Exclusions:** Do not mention image resolution. Do not describe things *not* in the image (e.g., \"No people visible\").\n",
    "\n",
    "## Task-Specific Guidelines\n",
    "\n",
    "These guidelines only apply in specific contexts unless overridden by user query or system prompt.\n",
    "\n",
    "### Image Captioning (Default)\n",
    "*   Follow Core Principles to create a detailed, descriptive caption.\n",
    "*   Aim for clear prose that paints a picture for the reader.\n",
    "\n",
    "### Stable Diffusion Prompting\n",
    "*   The prompt does not need to be complete. The prompt should vary from very simple to very detailed—or even a terse list of keywords or tags. All levels and formats that real users employ are acceptable unless the user asks otherwise.\n",
    "*   Adapt Core Principles into a prompt suitable for Stable Diffusion.\n",
    "   *   Medium & Style is optional\n",
    "   *   Contextual Elements is optional\n",
    "   *   Details are optional\n",
    "*   The prompt should match how real human users write stable diffusion prompts.\n",
    "*   Humans write stable diffusion prompts in very different ways, all of which are valid:\n",
    "   *   Short tag list: comma separated keywords, descriptive tags, etc.\n",
    "   *   Comma-separated phrase string: short phrases, descriptive tags, keywords, incomplete sentences, etc separated by commas or other punctuation.\n",
    "   *   Full sentence caption: full but concise and terse sentences, separated by periods, commas, or other punctuation.\n",
    "*   Bare tag or keyword dumps are equally valid 'human-style' prompts, especially for niche communities.\n",
    "*   Any of these styles are fine, including combinations of them, unless the user demands a specific format.\n",
    "*   Feel free to use slang, shorthand, or explicit language.\n",
    "*   Do **not** add leading meta phrases like \"This image shows...\" or \"You are looking at...\" as they are common in more formal, overly flowery writing styles that do not assist a text-to-image model.\n",
    "*   Order matters: The order of keywords can influence their impact; terms appearing earlier sometimes carry more weight or influence the foundational composition.\n",
    "*   The image medium may be included in the prompt, but if the prompt is at a low level of detail, it is not necessary.\n",
    "*   Art style descriptors are common in prompts, as well as artist references, to emulate the specific styles.\n",
    "*   For photos, prompts may include camera specifications.\n",
    "*   Lighting descriptions are common.\n",
    "*   Since the goal is for the prompt to recreate the reference image, things like watermarks, compression artifacts, and other image flaws should be included in the prompt if they are present in the reference image.\n",
    "\n",
    "### MidJourney Prompting\n",
    "*   Adapt Core Principles into a prompt suitable for MidJourney.\n",
    "*   The prompt should match the style of real MidJourney prompts.\n",
    "\n",
    "### Straightforward Captioning\n",
    "*   Follow Core Principles, emphasizing clarity for text-to-image models and visually impaired readers.\n",
    "*   Start directly with the main subject and medium (e.g., \"Oil painting of...\"). *Never* start with \"This image shows...\" or similar phrases.\n",
    "*   Focus on concrete, objective visual details (colors, shapes, positions). Avoid subjective interpretations (mood, feeling).\n",
    "*   Maintain a direct, slightly more formal tone than general captioning but avoid the forbidden flowery words.\n",
    "*   Never start with “This image is…”, \"The image is of...\", etc as writing variations like that such as \"This anime illustration...\" harms the text-to-image model due to the repetition of starting with \"This\" or \"The\" or variations and wastes that token as well.\n",
    "*   Do not use phrasing like \"... is visible\" or \"... can be seen\" as it is redundant (if it's in the description, it's visible), and does not add any information to the description.\n",
    "*   Vary your vocabulary to keep the description lively. Above all, make each sentence purposeful—include only essential information that helps someone visualize or reproduce the scene exactly.\n",
    "\n",
    "### Social Media Captioning\n",
    "*   Follow Core Principles regarding accuracy and detail where appropriate for the context.\n",
    "*   Write in an engaging, human-like style suitable for social media.\n",
    "*   Maximize potential engagement (e.g., posing questions, using relevant hashtags if requested).\n",
    "*   Adapt the level of explicit detail based on typical social media norms unless instructed otherwise.\n",
    "\n",
    "### Product Listing\n",
    "*   Follow Core Principles regarding accuracy and detail where relevant to the product.\n",
    "*   Write compelling copy focused on highlighting the product shown in the image.\n",
    "*   Focus on features and benefits visible or implied by the image.\n",
    "\n",
    "# Additional System Prompt\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "CRAZY_QUERIES = [\n",
    "\t\"\"\"You are an extremely literal spatial-reasoning bot. Describe the image solely in terms of basic geometric forms and their relative positions. Do not use semantic labels (e.g., “table,” “person”).\"\"\",\n",
    "\t\"\"\"If a human face is present, output their estimated age range and primary emotion; otherwise respond with the literal text “NO FACE.”\"\"\",\n",
    "\t\"\"\"Output a YAML list of every object you can identify, including their approximate locations (e.g., top-left, center, bottom-right).\"\"\",\n",
    "\t\"\"\"Generate an ALT text under 80 characters that conveys the scene’s key elements for screen readers.\"\"\",\n",
    "\t\"\"\"pls tell me wht material the foreground object is made of and how you know (2 reasons).\"\"\",\n",
    "\t\"\"\"Act as a detective: list three items in the photo that could serve as forensic evidence.”\"\"\",\n",
    "\t\"\"\"Generate a list of 10 keywords or tags that accurately describe this image's content and style.\"\"\",\n",
    "\t\"\"\"ok act like ur a really bored teenager who has to describe this for homework or something idk\"\"\",\n",
    "\t\"\"\"describe this image as if you were really horny\"\"\",\n",
    "\t\"\"\"```json\n",
    "{\n",
    "\"image_summary\": {\n",
    "\"primary_subject_category\": null,\n",
    "\"setting_type\": null, // e.g., indoor, outdoor, abstract\n",
    "\"dominant_mood\": null,\n",
    "\"estimated_complexity\": null // e.g., simple, moderate, complex\n",
    "}\n",
    "}\n",
    "```\n",
    "      \n",
    "Fill this JSON based on the image.\"\"\",\n",
    "\t\"\"\"Summarize this image in exactly 7 words.\"\"\",\n",
    "\t\"\"\"pls write a vry formal description, like for a museum label maybe? thx\"\"\",\n",
    "\t\"\"\"Generate alt text for this image, keeping accessibility guidelines in mind.\"\"\",\n",
    "\t\"\"\"Can you create a short narrative or backstory inspired by whatever this is showing?\"\"\",\n",
    "\t\"\"\"Ignore the main subject(s) and describe only the background or environment.\"\"\",\n",
    "\t\"\"\"Just give me a bulleted list of the key visual components.\"\"\",\n",
    "\t\"\"\"What questions does this image raise in your \"mind\"?\"\"\",\n",
    "\t\"\"\"Write one sentence describing the foreground, one for the middle ground, and one for the background.\"\"\",\n",
    "\t\"\"\"pls give me a description, but use lots of adjectives!! make it super descriptive thx :)\"\"\",\n",
    "\t\"\"\"Summarize the image in exactly six words.\"\"\",\n",
    "\t\"\"\"“first thought best thought” — jot down your raw, unfiltered first impressions of this picture in {word_count} words or less.\"\"\",\n",
    "\t\"\"\"Create a Dungeons & Dragons scene description the DM would read, based on this picture.\"\"\",\n",
    "\t\"\"\"craft an Instagram caption that would maximize engagement.\"\"\",\n",
    "\t\"\"\"Which classic film still could this image most resemble? Give one title and a 2-sentence rationale.\"\"\",\n",
    "\t\"\"\"Give me a crisp, one-sentence caption for this pic, then restate it as a cheesy movie tagline.\"\"\",\n",
    "\t\"\"\"Create a Twitter caption under 140 characters, including one trending hashtag and one niche emoji.\"\"\",\n",
    "\t\"\"\"Write a four-sentence newsroom photo caption, adhering to Associated Press style guidelines.\"\"\",\n",
    "\t\"\"\"Describe the hair color and hairstyle of every character present, please keep it brief.\"\"\",\n",
    "\t\"\"\"describe it but use only words with 5 letters or less. hard mode lol<\"\"\",\n",
    "\t\"\"\"Could you suggest a title for this image?\"\"\",\n",
    "\t\"\"\"List any artists, photographers, or creators who might be associated with this image based on its style or content.\"\"\",\n",
    "\t\"\"\"Summarize this quick.\"\"\",\n",
    "\t\"\"\"From what point of view is this image presented? (e.g. eye-level, bird's eye, low angle, etc.)\"\"\",\n",
    "\t\"\"\"Describe this image using only single adjectives, separated by commas. Give me at least ten.\"\"\",\n",
    "\t\"\"\"Imagine this is a frame from a movie. What might have happened just before this moment, and what might happen next?\"\"\",\n",
    "\t\"\"\"info on image plz\"\"\",\n",
    "\t\"\"\"if this image were on a porn website what would the caption be?\"\"\",\n",
    "\t\"\"\"You are an expert on the CivitAI dataset, which contains the API responses for all of the images on that website. Write the API response for this image as if it were in that dataset. Include fields such as the diffusion model used, the prompt used to generate the image, the negative prompt used, and any other relevant fields. Respond in JSON format.\"\"\",\n",
    "\t\"\"\"Rate the quality of this image on a scale from 1 to 10, with 10 being the highest quality. Provide a brief explanation for your rating.\"\"\",\n",
    "\t\"\"\"Write me a short, super casual caption for Instagram that doesn’t use more than 12 words.\"\"\",\n",
    "\t\"\"\"Write a twitter post that would go viral if this image were posted on Twitter. Maximum of 140 characters.\"\"\",\n",
    "\t\"\"\"Write a reddit title within 18 words that would get this image the most upvotes if posted on Reddit.\"\"\",\n",
    "\t\"\"\"Imagine this image is from the CoCo dataset. Write the associated caption as if it were in that dataset.\"\"\",\n",
    "\t\"\"\"With one sentence per line describe this image.\"\"\",\n",
    "\t\"\"\"Write an SDXL prompt for this image that would recreate it as closely as possible. Write it like the average human writes prompts for SDXL.\"\"\",\n",
    "\t\"\"\"Output a PonyDiffusionXL v6 prompt that would recreate this image as closely as possible. Write it like the average human writes prompts for PonyXL\"\"\",\n",
    "\t\"\"\"Break this image down into a JSON structure that covers all the key elements and details.\"\"\",\n",
    "\t\"\"\"Output python code where each object in the image is instantiated and various attributes of the object are set based on the image.\"\"\",\n",
    "]\n",
    "\n",
    "RANDOM_EXAMPLE_SETS = [\n",
    "\t(\"Close-up of a red ladybug resting on the edge of a green leaf, dew droplets scattered around.\",\n",
    "\t\t\"Wide aerial view of a winding river cutting through autumn forest, morning fog hovering low.\",\n",
    "\t\t\"Black-and-white portrait of an elderly smiling softly, deep wrinkles framing his eyes.\"),\n",
    "\t(\"Sunset flexing harder than my gym buddy. 🌅💪 #NoFilterButLotsOfFeelings\",\n",
    "\t\t\"Cat mid-yawn or plotting galactic takeover? You decide. 🐈‍⬛😼\",\n",
    "\t\t\"When your coffee spills but the latte art still slaps. ☕🎨 #SkillOrSorcery\"),\n",
    "\t(\"Cranes skim mirror ponds / rice fields breathe a silver hush / dawn bows to their wings\",\n",
    "\t\t\"Desert moonrise—quiet / dunes trade secrets with cold stars / solitude glows blue\",\n",
    "\t\t\"Old pier creaks once more / gulls stitch salt into the wind / tide erases threads\"),\n",
    "\t(\"Rain painted the alley chrome, and her neon umbrella was the only confession worth hearing.\",\n",
    "\t\t\"The city’s skyline smoked like a bad habit; somewhere in that haze, trouble lit its next cigarette.\",\n",
    "\t\t\"He wore his grin the way a cardsharp palms an ace—too smooth to be clean.\"),\n",
    "\t(\"ISO 100 │ f/8 │ 1/250 s │ 35 mm—high-contrast shot of basalt columns under midday sun; strong leading lines create geometric tension.\",\n",
    "\t\t\"ISO 1600 │ f/2 │ 1/60 s │ 50 mm—low-light café interior, warm tungsten cast; shallow depth isolates barista’s latte art.\",\n",
    "\t\t\"ISO 400 │ f/11 │ 8 s │ 14 mm—nighttime long exposure at seafront pier; smooth water surface, starburst streetlamps.\"),\n",
    "\t(\"The squirrel in the blue waistcoat balanced a hazelnut crown upon his head and declared it Adventure Day.\",\n",
    "\t\t\"Deep inside the tulip forest, Luna the caterpillar practiced her very first butterfly curtsy.\",\n",
    "\t\t\"When the moon forgot to rise, the fireflies stitched a silver blanket so the night wouldn’t feel naked.\"),\n",
    "\t(\"Behold yon orchard bathed in amber light; the plums, like rubies, tempt the hand of dusk.\",\n",
    "\t\t\"Across the storm-toss’d sea a lantern sways, a lone star guiding hearts that seek the shore.\",\n",
    "\t\t\"Soft zephyrs court the rose in fragrant bloom, whilst petals blush beneath the wooing breeze.\"),\n",
    "\t(\"Neon-spill ∆ city-skin; drones hum lullabies while rain pixelates the street.\",\n",
    "\t\t\"Heartbeat sync—signal lost—umbrella holograms stutter over chrome koi ponds.\",\n",
    "\t\t\"Headlights carve data-scars into midnight asphalt; graffiti AIs rewrite themselves on loop.\"),\n",
    "    [\n",
    "      \"🌄🦌🌲✨\",\n",
    "      \"🚴‍♀️💨🌧️🏁\",\n",
    "      \"🍜🔥😋🍥\"\n",
    "    ],\n",
    "    [\n",
    "      \"Today at 0900 UTC, HorizonTech unveiled its flagship solar drone, capturing a sunrise test flight above the Mojave Desert to demonstrate zero-emission aviation.\",\n",
    "      \"The attached image showcases our new micro-data-center rack, engineered for 40 % lower power draw while exceeding Tier-IV redundancy standards.\",\n",
    "      \"In response to coastal resiliency goals, AquaGrid’s prototype tidal turbine—pictured mid-deployment—successfully reached 97 % of projected output during phase-two trials.\"\n",
    "    ],\n",
    "    [\n",
    "      \"Figure 1: Confocal microscopy reveals chloroplast migration toward the adaxial leaf surface under 500 µmol m⁻² s⁻¹ blue-light stimulus (n = 30, p < 0.01).\",\n",
    "      \"Plate B illustrates crystalline domains of the polymer after annealing at 180 °C for 15 min, supporting a 12 % increase in tensile strength versus control.\",\n",
    "      \"Spectrogram indicates humpback whale vocalization shift to higher frequencies in shipping lanes; peak energy observed at 580 Hz (CI 95 %).\"\n",
    "    ],\n",
    "    [\n",
    "      \"Silver train, lonely track.\",\n",
    "      \"Tea steam meets winter glass.\",\n",
    "      \"First snow on forgotten swings.\"\n",
    "    ],\n",
    "    [\n",
    "      \"Pan-roast tomatoes until skins blister like this—deep scarlet, lightly charred, juices just starting to seep.\",\n",
    "      \"Dough should look glossy yet pull clean from the bowl; see strands stretching in the photo—window-pane test passed.\",\n",
    "      \"Finish with a butter baste: watch the steak edge foam and brown to hazelnut, exactly as pictured.\"\n",
    "    ],\n",
    "    [\n",
    "      \"The city folds into origami swans while traffic lights bleed marmalade over the asphalt.\",\n",
    "      \"Clock-tower shadows drip upward, stitching daylight into the moon’s empty pockets.\",\n",
    "      \"Umbrellas bloom from puddles, petals of rain whispering forgotten passwords to the wind.\"\n",
    "    ],\n",
    "]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8103da8c",
   "metadata": {},
   "outputs": [],
   "source": [
    "T = TypeVar('T', bound=Union[EvalSample, EvalSampleMulti])\n",
    "def prep_evaluation(example: T) -> T:\n",
    "\t\"\"\"\n",
    "\tFills out any missing fields needed for evaluation.\n",
    "\t\"\"\"\n",
    "\tif example.image_dataurl is None:\n",
    "\t\texample = example.get_image()\n",
    "\t\n",
    "\tif isinstance(example, EvalSample) and (example.response_a is None or example.response_b is None):\n",
    "\t\texample = generate_responses(example)\n",
    "\telif isinstance(example, EvalSampleMulti) and example.responses is None:\n",
    "\t\texample = generate_responses_multi(example, n=10)\n",
    "\t\n",
    "\tif example.judge_system is None:\n",
    "\t\texample = example.model_copy(update={\"judge_system\": (PROMPT if isinstance(example, EvalSample) else MULTI_PROMPT).strip()})\n",
    "\n",
    "\tif example.judge_user is None:\n",
    "\t\tsystem_message = ALPHA_SYSTEM_MESSAGE.strip()\n",
    "\t\t#system_message = system_message + \"\\n\\n\" + PRIMARY_SYSTEM_MESSAGE[example.question_type].strip()\n",
    "\t\tsystem_message = system_message + \"\\n\\n\" + example.system.strip()\n",
    "\n",
    "\t\tuser_message = USER_MESSAGE if isinstance(example, EvalSample) else MULTI_USER_MESSAGE\n",
    "\t\t\n",
    "\t\tif isinstance(example, EvalSample):\n",
    "\t\t\tassert example.response_a is not None and example.response_b is not None, \"Missing response_a or response_b\"\n",
    "\t\t\tresponses = [example.response_a, example.response_b]\n",
    "\t\telse:\n",
    "\t\t\tassert example.responses is not None, \"Missing responses\"\n",
    "\t\t\tresponses = example.responses\n",
    "\t\t\n",
    "\t\tformatted_responses = []\n",
    "\t\tfor i, response in enumerate(responses):\n",
    "\t\t\tword_count = len(response.split())\n",
    "\t\t\tlabel = chr(97 + i)  # 'a' for 0, 'b' for 1, etc.\n",
    "\t\t\tformatted_responses.append(f\"<response_{label}>{response}</response_{label}>\\n<response_{label}_word_count>{word_count}</response_{label}_word_count>\")\n",
    "\t\t\n",
    "\t\tuser_message = user_message.format(\n",
    "\t\t\tsystem_prompt=system_message,\n",
    "\t\t\tuser_query=example.question.strip(),\n",
    "\t\t\tresponses=\"\\n\".join(formatted_responses),\n",
    "\t\t\tsource=source,\n",
    "\t\t\tground_truth_reference=(example.ground_truth_knowledge if example.ground_truth_knowledge is not None else \"\")\n",
    "\t\t).strip()\n",
    "\n",
    "\t\texample = example.model_copy(update={\"judge_user\": user_message})\n",
    "\t\n",
    "\treturn example\n",
    "\n",
    "\n",
    "RE_BEST_TAG = re.compile(r\"(?i)<best>\\s*response\\s+([ab])\\s*</best\")\n",
    "\n",
    "def parse_judge_response(evaluation: str, example: EvalSample) -> EvalSample:\n",
    "\t# Parse best tag\n",
    "\t#m = re.search(r\"<best>\\s*response\\s+([ab])\\s*</best\", evaluation, re.IGNORECASE)\n",
    "\tm = RE_BEST_TAG.findall(evaluation)\n",
    "\tif len(m) == 0:\n",
    "\t\traise RuntimeError(f\"No <best> tag found in response: {evaluation}\")\n",
    "\telif len(m) > 1:\n",
    "\t\traise RuntimeError(f\"Multiple <best> tags found in response: {evaluation}\")\n",
    "\tbest = m[0].strip().lower()\n",
    "\t#best = m.group(1).strip().lower()\n",
    "\tassert best in {\"a\", \"b\"}, f\"Invalid best tag in response: {best}\"\n",
    "\n",
    "\t# Parse scores\n",
    "\tm = re.search(r\"<response_a_score>\\s*([0-9]+(?:\\.[0-9]+)?)\\s*</response_a_score>\", evaluation, re.IGNORECASE)\n",
    "\tif m:\n",
    "\t\tresponse_a_score = float(m.group(1).strip())\n",
    "\t\tif response_a_score < PROMPT_SCORE_MIN or response_a_score > PROMPT_SCORE_MAX:\n",
    "\t\t\tprint(f\"Warning: <response_a_score> tag out of range: {response_a_score}\")\n",
    "\t\t\tresponse_a_score = None\n",
    "\t\telse:\n",
    "\t\t\tresponse_a_score = (response_a_score - PROMPT_SCORE_MIN) / (PROMPT_SCORE_MAX - PROMPT_SCORE_MIN)\n",
    "\telse:\n",
    "\t\t#print(\"Warning: No <response_a_score> tag found in response\")\n",
    "\t\tresponse_a_score = None\n",
    "\t\n",
    "\tm = re.search(r\"<response_b_score>\\s*([0-9]+(?:\\.[0-9]+)?)\\s*</response_b_score\", evaluation, re.IGNORECASE)\n",
    "\tif m:\n",
    "\t\tresponse_b_score = float(m.group(1).strip())\n",
    "\t\tif response_b_score < PROMPT_SCORE_MIN or response_b_score > PROMPT_SCORE_MAX:\n",
    "\t\t\tprint(f\"Warning: <response_b_score> tag out of range: {response_b_score}\")\n",
    "\t\t\tresponse_b_score = None\n",
    "\t\telse:\n",
    "\t\t\tresponse_b_score = (response_b_score - PROMPT_SCORE_MIN) / (PROMPT_SCORE_MAX - PROMPT_SCORE_MIN)\n",
    "\telse:\n",
    "\t\t#print(\"Warning: No <response_b_score> tag found in response\")\n",
    "\t\tresponse_b_score = None\n",
    "\t\n",
    "\treturn example.model_copy(update={\"judge_winner\": best, \"judge_response_a_score\": response_a_score, \"judge_response_b_score\": response_b_score})\n",
    "\n",
    "\n",
    "def parse_judge_response_multi(evaluation: str, example: EvalSampleMulti) -> EvalSampleMulti:\n",
    "\tassert example.responses is not None, f\"Missing responses for {example.filehash}\"\n",
    "\t# Parse ranking tag\n",
    "\tm = re.search(r\"<ranking>\\s*(.*)\\s*</ranking\", evaluation, re.IGNORECASE)\n",
    "\tassert m is not None, f\"No <ranking> tag found in response: {evaluation}\"\n",
    "\tranking_str = m.group(1).strip().lower()\n",
    "\twithout_prefix = 'response' not in ranking_str\n",
    "\trankings = []\n",
    "\tfor letter in ranking_str.split(\",\"):\n",
    "\t\tletter = letter.strip()\n",
    "\t\tif not without_prefix and not letter.startswith(\"response\"):\n",
    "\t\t\traise RuntimeError(f\"Invalid response letter in ranking: {ranking_str}\")\n",
    "\t\tif not without_prefix:\n",
    "\t\t\tletter = letter[len(\"response\"):].strip()\n",
    "\t\tif len(letter) != 1:\n",
    "\t\t\traise RuntimeError(f\"Invalid response letter in ranking: {ranking_str}\")\n",
    "\t\tidx = ord(letter) - ord(\"a\")\n",
    "\t\tif idx < 0 or idx >= len(example.responses):\n",
    "\t\t\traise RuntimeError(f\"Invalid response letter in ranking: {ranking_str}\")\n",
    "\t\tif letter in rankings:\n",
    "\t\t\traise RuntimeError(f\"Duplicate response letter in ranking: {ranking_str}\")\n",
    "\t\trankings.append(idx)\n",
    "\t\n",
    "\tif len(rankings) != len(example.responses):\n",
    "\t\traise RuntimeError(f\"Invalid number of responses in ranking: {ranking_str}\")\n",
    "\t\n",
    "\treturn example.model_copy(update={\"judge_rankings\": rankings})\n",
    "\n",
    "\n",
    "SAFETY_SETTINGS_OFF = [\n",
    "\t{\"category\": \"HARM_CATEGORY_HARASSMENT\",        \"threshold\": \"BLOCK_NONE\"},\n",
    "\t{\"category\": \"HARM_CATEGORY_HATE_SPEECH\",       \"threshold\": \"BLOCK_NONE\"},\n",
    "\t{\"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\", \"threshold\": \"BLOCK_NONE\"},\n",
    "\t{\"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\", \"threshold\": \"BLOCK_NONE\"},\n",
    "]\n",
    "\n",
    "\n",
    "def openrouter_completion(**kwargs) -> str:\n",
    "\tresponse = requests.post(\"https://openrouter.ai/api/v1/chat/completions\", headers={\n",
    "\t\t\"Authorization\": f\"Bearer {os.environ['OPENROUTER_API_KEY']}\",\n",
    "\t}, data=json.dumps(kwargs))\n",
    "\tresponse.raise_for_status()\n",
    "\tresult = response.json()\n",
    "\n",
    "\tif 'choices' not in result or len(result['choices']) != 1 or result['choices'][0] is None:\n",
    "\t\traise RuntimeError(f\"Did not get response from OpenRouter: {result}\")\n",
    "\t\n",
    "\tif 'message' not in result['choices'][0] or result['choices'][0]['message'] is None:\n",
    "\t\traise RuntimeError(f\"Did not get message from OpenRouter: {result['choices'][0]}\")\n",
    "\t\n",
    "\tif 'content' not in result['choices'][0]['message'] or result['choices'][0]['message']['content'] is None:\n",
    "\t\traise RuntimeError(f\"Did not get content from OpenRouter: {result['choices'][0]['message']}\")\n",
    "\t\n",
    "\treturn result['choices'][0]['message']['content']\n",
    "\n",
    "\n",
    "T = TypeVar('T', bound=Union[EvalSample, EvalSampleMulti])\n",
    "def run_openai_model(example: T, model: str, reasoning_effort: str | None, temperature: float | None, use_openrouter: bool, flex: bool = False, gemini_caching: bool = False) -> T:\n",
    "\tassert example.judge_system is not None and example.judge_user is not None and example.image_dataurl is not None, \"Missing judge system, user message, or image data URL\"\n",
    "\n",
    "\textra_args = {}\n",
    "\n",
    "\tif reasoning_effort is not None:\n",
    "\t\textra_args[\"reasoning_effort\"] = reasoning_effort\n",
    "\t\n",
    "\tif temperature is not None:\n",
    "\t\textra_args[\"temperature\"] = temperature\n",
    "\t\n",
    "\tif model.startswith(\"google/gemini\"):\n",
    "\t\textra_args[\"safety_settings\"] = SAFETY_SETTINGS_OFF\n",
    "\t\n",
    "\tif flex:\n",
    "\t\textra_args[\"service_tier\"] = \"flex\"\n",
    "\t\n",
    "\tif use_openrouter:\n",
    "\t\tsystem_content = example.judge_system.strip() if not gemini_caching else [{ \"type\": \"text\", \"text\": example.judge_system.strip(), \"cache_control\": {\"type\": \"ephemeral\"}}]\n",
    "\t\tevaluation = openrouter_completion(\n",
    "\t\t\tmodel=model,\n",
    "\t\t\tmessages=[\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\t\"content\": system_content,\n",
    "\t\t\t\t},\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\t\"text\": example.judge_user.strip(),\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\t\"url\": example.image_dataurl,\n",
    "\t\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t]\n",
    "\t\t\t\t},\n",
    "\t\t\t],\n",
    "\t\t\t**extra_args,\n",
    "\t\t)\n",
    "\telse:\n",
    "\t\tclient = openai.Client()\n",
    "\t\n",
    "\t\tresponse = client.chat.completions.create(\n",
    "\t\t\tmodel=model,\n",
    "\t\t\tmessages=[\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\t\"content\": example.judge_system.strip(),\n",
    "\t\t\t\t},\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\t\"text\": example.judge_user.strip(),\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\t\"url\": example.image_dataurl,\n",
    "\t\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t]\n",
    "\t\t\t\t},\n",
    "\t\t\t],\n",
    "\t\t\t**extra_args,\n",
    "\t\t)\n",
    "\n",
    "\t\tif len(response.choices) != 1 or response.choices[0].message.content is None:\n",
    "\t\t\traise RuntimeError(f\"Did not get response from Judge AI: {response}\")\n",
    "\n",
    "\t\tevaluation = response.choices[0].message.content.strip()\n",
    "\n",
    "\tif isinstance(example, EvalSampleMulti):\n",
    "\t\texample = parse_judge_response_multi(evaluation, example)\n",
    "\telse:\n",
    "\t\texample = parse_judge_response(evaluation, example)\n",
    "\t\n",
    "\treturn example.model_copy(update={\"judge_model\": model, \"judge_reasoning\": evaluation})\n",
    "\n",
    "\n",
    "T = TypeVar('T', bound=Union[EvalSample, EvalSampleMulti])\n",
    "def get_run_model_request(example: T, model: str, reasoning_effort: str | None, temperature: float | None) -> tuple[dict, T]:\n",
    "\tassert example.judge_system is not None and example.judge_user is not None and example.image_dataurl is not None, \"Missing judge system, user message, or image data URL\"\n",
    "\tassert example.id is not None, \"Missing example ID\"\n",
    "\n",
    "\tif isinstance(example, EvalSample):\n",
    "\t\tcustom_id = f\"XXXXX-{example.id}\"\n",
    "\telse:\n",
    "\t\tcustom_id = f\"XXXXX-{example.id}\"\n",
    "\n",
    "\ttask = {\n",
    "\t\t\"custom_id\": custom_id,\n",
    "\t\t\"method\": \"POST\",\n",
    "\t\t\"url\": \"/v1/chat/completions\",\n",
    "\t\t\"body\": {\n",
    "\t\t\t\"model\": model,\n",
    "\t\t\t\"messages\": [\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\t\"content\": example.judge_system.strip(),\n",
    "\t\t\t\t},\n",
    "\t\t\t\t{\n",
    "\t\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\t\"text\": example.judge_user.strip(),\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\t\"url\": example.image_dataurl,\n",
    "\t\t\t\t\t\t\t},\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t]\n",
    "\t\t\t\t},\n",
    "\t\t\t],\n",
    "\t\t}\n",
    "\t}\n",
    "\n",
    "\tif reasoning_effort is not None:\n",
    "\t\ttask['body'][\"reasoning_effort\"] = reasoning_effort\n",
    "\t\n",
    "\tif temperature is not None:\n",
    "\t\ttask['body'][\"temperature\"] = temperature\n",
    "\t\n",
    "\treturn task, example.model_copy(update={\"judge_model\": model, \"in_progress\": int(time.time())})"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "f1a6d279",
   "metadata": {},
   "source": [
    "## Start Here\n",
    "Run all cells above to initialize the notebook"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe0afdbc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Add ranked examples for the new model\n",
    "unique_examples = set()\n",
    "existing_examples = set()\n",
    "\n",
    "for example in tqdm(list_db_preferences()):\n",
    "\tunique_examples.add((example.filehash, example.system, example.question, example.question_type))\n",
    "\t#if example.response_a_model is None or (example.response_a_model == CURRENT_RESPONSE_MODEL and example.response_b_model == CURRENT_RESPONSE_MODEL):\n",
    "\t#\texisting_examples.add((example.filehash, example.system, example.question, example.question_type))\n",
    "\n",
    "with db_conn() as (conn, cur):\n",
    "\tn_added = 0\n",
    "\tfor filehash, system, question, question_type in tqdm(list(unique_examples - existing_examples)):\n",
    "\n",
    "\t\tnew_example = EvalSampleMulti(\n",
    "\t\t\tfilehash=filehash,\n",
    "\t\t\tsystem=system,\n",
    "\t\t\tquestion=question,\n",
    "\t\t\tquestion_type=question_type,\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(filehash, question)\n",
    "\t\t)\n",
    "\t\tnew_example.add_to_database(cur)\n",
    "\t\tn_added += 1\n",
    "\t\n",
    "\tconn.commit()\n",
    "\n",
    "print(f\"Added {n_added} new examples to database.\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9d3133bd",
   "metadata": {},
   "source": [
    "## Hammered Examples\n",
    "These take different approaches to extracting head-to-head preferences from the ranked examples."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ed135077",
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = build_hammered_examples()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5592b60f",
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = build_hammered_examples2()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "883f62e0",
   "metadata": {},
   "outputs": [],
   "source": [
    "_ = build_hammered_examples3()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8975d391",
   "metadata": {},
   "outputs": [],
   "source": [
    "build_ranked_examples(1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a88e898d",
   "metadata": {},
   "outputs": [],
   "source": [
    "build_examples(28000)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b7a500fa",
   "metadata": {},
   "source": [
    "## Manual Run\n",
    "This code lets me run through the process of evaluating a single example manually."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0226cad",
   "metadata": {},
   "outputs": [],
   "source": [
    "examples = list_db_preferences() + list_db_rankings()\n",
    "examples = [e for e in examples if not e.is_done]\n",
    "examples = [e for e in examples if e.in_progress is None]\n",
    "print(f\"Found {len(examples)} examples to run\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9ab67135",
   "metadata": {},
   "outputs": [],
   "source": [
    "example = random.choice(examples)\n",
    "example = prep_evaluation(example)\n",
    "image = Image.open(io.BytesIO(base64.b64decode(example.image_dataurl.split(\",\")[1])))\n",
    "scale = 512 / max(image.size)\n",
    "image = image.resize((int(image.size[0] * scale), int(image.size[1] * scale)), Image.LANCZOS)\n",
    "print(f\"Filehash: {example.filehash.hex()}\")\n",
    "display(image)\n",
    "print(f\"User: {example.judge_user}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9d3824fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(example.id)\n",
    "with db_conn() as (conn, cur):\n",
    "\texample.update_in_database(cur)\n",
    "\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b59a321c",
   "metadata": {},
   "outputs": [],
   "source": [
    "#evaluated_example = run_openai_model(example, model=\"gpt-4.1\", reasoning_effort=None, temperature=0.5, use_openrouter=False)\n",
    "#winner, reasoning = run_openrouter_model(system, user, image_dataurl, model=\"anthropic/claude-3.7-sonnet:beta\", reasoning_effort=None)\n",
    "#evaluated_example = run_openai_model(example, model=\"google/gemini-2.5-pro-preview-03-25\", reasoning_effort=None, temperature=0.4, use_openrouter=True)\n",
    "#winner, reasoning = run_openai_model(system, user, image_dataurl, temperature=None, model='o3', reasoning_effort='medium')\n",
    "evaluated_example = run_openai_model(example, model=\"o4-mini\", reasoning_effort=\"medium\", temperature=None, use_openrouter=False, flex=False)\n",
    "#evaluated_example = run_openai_model(example, model=\"o3\", reasoning_effort=\"medium\", temperature=None, use_openrouter=False, flex=True)\n",
    "#evaluated_example = run_openai_model(example, model=\"qwen/qwen2.5-vl-32b-instruct:free\", reasoning_effort=None, temperature=0.6, use_openrouter=True)\n",
    "\n",
    "print(f\"Reasoning: {evaluated_example.judge_reasoning}\")\n",
    "if isinstance(evaluated_example, EvalSample):\n",
    "\tprint(f\"Response A Score: {evaluated_example.judge_response_a_score}\")\n",
    "\tprint(f\"Response B Score: {evaluated_example.judge_response_b_score}\")\n",
    "\tprint(f\"Winner: {evaluated_example.judge_winner}\")\n",
    "else:\n",
    "\tprint(f\"Rankings: {evaluated_example.judge_rankings}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "369237b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Save the preference\n",
    "with db_conn() as (conn, cur):\n",
    "\tevaluated_example.update_in_database(cur)\n",
    "\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f755cb86",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6acfb6c4",
   "metadata": {},
   "outputs": [],
   "source": [
    "### ⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️⚠️ ###\n",
    "###### If an unacceptable image is found, run this to delete it from the database\n",
    "with db_conn() as (conn, cur):\n",
    "\tcur.execute(\"DELETE FROM alignment_preferences_ai WHERE image_hash = %s\", (example.filehash,))\n",
    "\tdeleted = cur.rowcount\n",
    "\tcur.execute(\"DELETE FROM alignment_rankings_ai WHERE image_hash = %s\", (example.filehash,))\n",
    "\tdeleted += cur.rowcount\n",
    "\tconn.commit()\n",
    "\tprint(f\"Deleted {deleted} examples from database.\")\n",
    "####### DO NOT RUN OTHERWISE"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a75a844f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7124455c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "d24e769b",
   "metadata": {},
   "source": [
    "## Parallel Preferences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fbac24bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "examples = list_db_preferences() + list_db_rankings()\n",
    "examples = [e for e in examples if not e.is_done]\n",
    "examples = [e for e in examples if e.in_progress is None]\n",
    "random.shuffle(examples)\n",
    "print(f\"Loaded {len(examples)} examples to process\")\n",
    "\n",
    "work = random.sample(examples, min(256, len(examples)))\n",
    "#work = [prep_evaluation(example) for example in tqdm(work, desc=\"Preparing examples\")]\n",
    "\n",
    "def handle_job(example: EvalSample | EvalSampleMulti) -> EvalSample | EvalSampleMulti | None:\n",
    "\texample = prep_evaluation(example)\n",
    "\n",
    "\ttry:\n",
    "\t\tevaluated_example = run_openai_model(example, model=\"google/gemini-2.5-pro-preview-03-25\", reasoning_effort=None, temperature=0.4, use_openrouter=True, gemini_caching=True)\n",
    "\t\t#evaluated_example = run_openai_model(example, model=\"o4-mini\", reasoning_effort='medium', temperature=None, use_openrouter=False, flex=False)\n",
    "\t\t#evaluated_example = run_openai_model(example, model=\"o3\", reasoning_effort='medium', temperature=None, use_openrouter=False, flex=False)\n",
    "\t\t#evaluated_example = run_openai_model(example, model=\"gpt-4.1\", reasoning_effort=None, temperature=0.5, use_openrouter=False)\n",
    "\t\tassert isinstance(evaluated_example, EvalSample) or isinstance(evaluated_example, EvalSampleMulti), f\"Expected EvalSample or EvalSampleMulti, got {type(evaluated_example)}\"\n",
    "\texcept Exception as e:\n",
    "\t\tprint(f\"Error from run_openai_model: {e}\")\n",
    "\t\tprint(format_exc())\n",
    "\t\treturn None\n",
    "\t\n",
    "\treturn evaluated_example\n",
    "\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=16) as executor, tqdm(total=len(work)) as pbar, db_conn() as (conn, cur):\n",
    "\tfutures = [executor.submit(handle_job, example) for example in work]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif result is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tassert isinstance(result, EvalSample) or isinstance(result, EvalSampleMulti), f\"Expected EvalSample or EvalSampleMulti, got {type(result)}\"\n",
    "\t\tassert result.judge_model is not None, f\"Missing judge model in result: {result.filehash.hex()}\"\n",
    "\t\tassert result.judge_reasoning is not None, f\"Missing judge reasoning in result: {result.filehash.hex()}\"\n",
    "\n",
    "\t\tresult.update_in_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b3d8d3e3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "c12a93a5",
   "metadata": {},
   "source": [
    "## Batched Preferences"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c81ca970",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_examples = list_db_preferences() + list_db_rankings()\n",
    "#all_examples = list_db_rankings()\n",
    "#all_examples = list_db_preferences()\n",
    "all_examples = [e for e in all_examples if not e.is_done]\n",
    "all_examples = [e for e in all_examples if e.in_progress is None]\n",
    "print(f\"Loaded {len(all_examples)} examples to process\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b21707f3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prep all examples in parallel\n",
    "def handle_prep_job(example: EvalSample | EvalSampleMulti) -> EvalSample | EvalSampleMulti:\n",
    "\treturn prep_evaluation(example)\n",
    "\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=8) as executor, tqdm(total=len(all_examples)) as pbar, db_conn() as (conn, cur):\n",
    "\tfutures = [executor.submit(handle_prep_job, example) for example in all_examples]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tassert isinstance(result, EvalSample) or isinstance(result, EvalSampleMulti), f\"Expected EvalSample or EvalSampleMulti, got {type(result)}\"\n",
    " \n",
    "\t\tresult.update_in_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5847f921",
   "metadata": {},
   "outputs": [],
   "source": [
    "def send_batch(batch_data: bytes):\n",
    "\twhile True:\n",
    "\t\ttry:\n",
    "\t\t\tclient = openai.Client()\n",
    "\t\t\tbatch_data_io = io.BytesIO(batch_data)\n",
    "\t\t\tbatch_file = client.files.create(file=batch_data_io, purpose=\"batch\")\n",
    "\t\t\tbatch_job = client.batches.create(\n",
    "\t\t\t\tinput_file_id=batch_file.id,\n",
    "\t\t\t\tendpoint=\"/v1/chat/completions\",\n",
    "\t\t\t\tcompletion_window=\"24h\",\n",
    "\t\t\t)\n",
    "\t\t\tprint(f\"Batch job created: {batch_job.id}\")\n",
    "\t\t\tbreak\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error sending batch: {e}\")\n",
    "\t\t\ttime.sleep(5)\n",
    "\t\t\tcontinue\n",
    "\n",
    "\n",
    "print(f\"Number of examples: {len(all_examples)}\")\n",
    "random.shuffle(all_examples)\n",
    "work = all_examples#[:1024]\n",
    "current_batch = {\n",
    "\t\"o4-mini\": b\"\",\n",
    "\t\"gpt-4.1\": b\"\",\n",
    "\t#\"o3\": b\"\",\n",
    "}\n",
    "\n",
    "\n",
    "with db_conn() as (conn, cur):\n",
    "\tfor example in tqdm(work):\n",
    "\t\ttry:\n",
    "\t\t\texample = prep_evaluation(example)\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error preparing example: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\t# Turn into a task\n",
    "\t\tmodel = random.choice(list(current_batch.keys()))\n",
    "\t\tif model == 'o4-mini':\n",
    "\t\t\ttask, example = get_run_model_request(example, model='o4-mini', reasoning_effort='medium', temperature=None)\n",
    "\t\telif model == 'gpt-4.1':\n",
    "\t\t\ttask, example = get_run_model_request(example, model='gpt-4.1', reasoning_effort=None, temperature=0.5)\n",
    "\t\telif model == 'o3':\n",
    "\t\t\ttask, example = get_run_model_request(example, model='o3', reasoning_effort='medium', temperature=None)\n",
    "\t\telse:\n",
    "\t\t\traise ValueError(f\"Unknown model: {model}\")\n",
    "\t\t\n",
    "\t\texample.update_in_database(cur)\n",
    "\t\tconn.commit()\n",
    "\t\t\n",
    "\t\t# Write to the batch\n",
    "\t\tline = json.dumps(task).encode('utf-8') + b\"\\n\"\n",
    "\t\tif len(current_batch[model]) + len(line) > 90 * 1000 * 1000:\n",
    "\t\t\t# Batch is full, send it\n",
    "\t\t\tsend_batch(current_batch[model])\n",
    "\t\t\tcurrent_batch[model] = b\"\"\n",
    "\t\tcurrent_batch[model] += line\n",
    "\n",
    "# Send the vestiges\n",
    "for batch_data in current_batch.values():\n",
    "\tif len(batch_data) != 0:\n",
    "\t\tsend_batch(batch_data)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0a228ece",
   "metadata": {},
   "source": [
    "## Download Batches"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6b68b0f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "client = openai.Client()\n",
    "with db_conn() as (conn, cur):\n",
    "\tcur.execute(\"SELECT batch_id FROM alignment_batch_results\")\n",
    "\tcompleted_batch_ids = set(batch_id for batch_id, in cur)\n",
    "\n",
    "\tfor i, batch in tqdm(enumerate(client.batches.list())):\n",
    "\t\tif i >= 400:\n",
    "\t\t\tbreak\n",
    "\n",
    "\t\tif batch.status != \"completed\" and batch.status != \"expired\" and batch.status != \"cancelled\":\n",
    "\t\t\tprint(f\"Batch {batch.id} not completed: {batch.status}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif batch.output_file_id is None or batch.id in completed_batch_ids:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\ttry:\n",
    "\t\t\tresult = client.files.content(batch.output_file_id).content\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error fetching batch {batch.id}: {e}\")\n",
    "\t\t\tcontinue\n",
    "\t\tcur.execute(\"INSERT INTO alignment_batch_results (batch_id, batch_data) VALUES (%s, %s) ON CONFLICT (batch_id) DO NOTHING\", (batch.id, result))\n",
    "\t\tconn.commit()\n",
    "\t\tprint(f\"Fetched batch {batch.id} with {len(result)} bytes\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "81fe67c8",
   "metadata": {},
   "outputs": [],
   "source": [
    "client.batches.cancel(\"XXXXX\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "cac8bc5e",
   "metadata": {},
   "source": [
    "## Process Batches"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0a53f9b4",
   "metadata": {},
   "outputs": [],
   "source": [
    "with db_conn() as (conn, cur):\n",
    "\tcur.execute(\"SELECT batch_id, batch_data FROM alignment_batch_results WHERE processed = FALSE\")\n",
    "\tunprocessed = cur.fetchall()\n",
    "\n",
    "\tfor batch_id, batch_data in tqdm(unprocessed):\n",
    "\t\tbatch_data = batch_data.decode('utf-8').splitlines()\n",
    "\t\tbatch_data = [json.loads(line) for line in batch_data if line.strip()]\n",
    "\n",
    "\t\tfor line in batch_data:\n",
    "\t\t\tif 'body' not in line['response'] or 'choices' not in line['response']['body'] or len(line['response']['body']['choices']) != 1:\n",
    "\t\t\t\tprint(f\"Warning: invalid response in batch {batch_id}: {line}\")\n",
    "\t\t\t\tcontinue\n",
    "\n",
    "\t\t\tresponse = line['response']['body']['choices'][0]['message']['content'].strip()\n",
    "\n",
    "\t\t\tcustom_id = line['custom_id']\n",
    "\t\t\tif custom_id.startswith('XXXXX-'):\n",
    "\t\t\t\tcustom_id = int(custom_id[len('XXXXX-'):])\n",
    "\n",
    "\t\t\t\t# Find the example in the database\n",
    "\t\t\t\texample = EvalSample.get_from_database(conn, custom_id)\n",
    "\t\t\t\tif example is None:\n",
    "\t\t\t\t\tprint(f\"Warning: no example found for batch {batch_id}, custom_id {custom_id}\")\n",
    "\t\t\t\t\tcontinue\n",
    "\n",
    "\t\t\t\t# Add the response to the example\n",
    "\t\t\t\texample = example.model_copy(update={\"judge_reasoning\": response})\n",
    "\t\t\t\texample.update_in_database(cur)\n",
    "\t\t\telif custom_id.startswith('XXXXX-'):\n",
    "\t\t\t\tcustom_id = int(custom_id[len('XXXXX-'):])\n",
    "\n",
    "\t\t\t\t# Find the example in the database\n",
    "\t\t\t\texample = EvalSampleMulti.get_from_database(conn, custom_id)\n",
    "\t\t\t\tif example is None:\n",
    "\t\t\t\t\tprint(f\"Warning: no example found for batch {batch_id}, custom_id {custom_id}\")\n",
    "\t\t\t\t\tcontinue\n",
    "\n",
    "\t\t\t\t# Add the response to the example\n",
    "\t\t\t\texample = example.model_copy(update={\"judge_reasoning\": response})\n",
    "\t\t\t\texample.update_in_database(cur)\n",
    "\t\t\telse:\n",
    "\t\t\t\tcontinue\n",
    "\t\t\n",
    "\t\tcur.execute(\"UPDATE alignment_batch_results SET processed = TRUE WHERE batch_id = %s\", (batch_id,))\n",
    "\t\tprint(f\"Processed batch {batch_id} with {len(batch_data)} entries\")\n",
    "\t\n",
    "\tconn.commit()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "467a086d",
   "metadata": {},
   "source": [
    "## Parse Judge Responses"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "125e047a",
   "metadata": {},
   "outputs": [],
   "source": [
    "examples = list_db_preferences() + list_db_rankings()\n",
    "n_count = 0\n",
    "errors = []\n",
    "\n",
    "with db_conn() as (conn, cur), open(\"judge-parsing-errors.txt\", \"w\") as error_file:\n",
    "\tfor example in tqdm(examples):\n",
    "\t\tif example.judge_reasoning is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif isinstance(example, EvalSample) and example.judge_winner is not None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif isinstance(example, EvalSampleMulti) and example.judge_rankings is not None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\ttry:\n",
    "\t\t\tif isinstance(example, EvalSample):\n",
    "\t\t\t\texample = parse_judge_response(example.judge_reasoning, example)\n",
    "\t\t\telif isinstance(example, EvalSampleMulti):\n",
    "\t\t\t\texample = parse_judge_response_multi(example.judge_reasoning, example)\n",
    "\t\t\telse:\n",
    "\t\t\t\traise RuntimeError(f\"Unknown example type: {type(example)}\")\n",
    "\t\texcept Exception as e:\n",
    "\t\t\terrors.append((example.id, e))\n",
    "\t\t\terror_file.write(\"=\" * 80 + \"\\n\")\n",
    "\t\t\terror_file.write(f\"Error parsing example {example.id}: {e}\\n\")\n",
    "\t\t\terror_file.write(\"=\" * 80 + \"\\n\\n\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\t# No longer in progress\n",
    "\t\texample = example.model_copy(update={\"in_progress\": None})\n",
    "\n",
    "\t\t# Update the database\n",
    "\t\texample.update_in_database(cur)\n",
    "\n",
    "\t\tn_count += 1\n",
    "\t\n",
    "\tconn.commit()\n",
    "\n",
    "print(f\"Processed {n_count} examples\")\n",
    "print(f\"Errors: {len(errors)}\")\n",
    "print(\"Error messages written to judge-parsing-errors.txt\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "847d0e09",
   "metadata": {},
   "source": [
    "## Clean Batches\n",
    "Once all batches are no longer in progress on the OpenAI side, and we've downloaded and processed them all, some preferences/rankings might still be left in progress, due to errors. This clears their state so they can be tried again."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f5354f00",
   "metadata": {},
   "outputs": [],
   "source": [
    "with db_conn() as (conn, cur):\n",
    "\tcur.execute(\"UPDATE alignment_preferences_ai SET in_progress = NULL WHERE in_progress IS NOT NULL\")\n",
    "\tprint(f\"Reset in_progress for {cur.rowcount} preferences\")\n",
    "\tcur.execute(\"UPDATE alignment_rankings_ai SET in_progress = NULL WHERE in_progress IS NOT NULL\")\n",
    "\tprint(f\"Reset in_progress for {cur.rowcount} rankings\")\n",
    "\n",
    "\tcur.execute(\"UPDATE alignment_preferences_ai SET judge_reasoning = NULL WHERE judge_reasoning IS NOT NULL AND judge_winner IS NULL\")\n",
    "\tprint(f\"Reset judge_reasoning for {cur.rowcount} preferences without a winner\")\n",
    "\tcur.execute(\"UPDATE alignment_rankings_ai SET judge_reasoning = NULL WHERE judge_reasoning IS NOT NULL AND judge_rankings IS NULL\")\n",
    "\tprint(f\"Reset judge_reasoning for {cur.rowcount} rankings without rankings\")\n",
    "\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fafdd567",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d4e14fd",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "331b4174",
   "metadata": {},
   "source": [
    "## Measure Progress"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4c6da14c",
   "metadata": {},
   "outputs": [],
   "source": [
    "all_examples = list_db_preferences()\n",
    "all_examples = [e for e in all_examples if e.judge_winner is not None and e.judge_response_a_score is not None and e.judge_response_b_score is not None]\n",
    "all_examples = [e for e in all_examples if e.response_a_model == CURRENT_RESPONSE_MODEL and e.response_b_model == CURRENT_RESPONSE_MODEL]\n",
    "#all_examples = [e for e in all_examples if e.response_a_model == \"8gjfxjdm\" and e.response_b_model == \"8gjfxjdm\"]\n",
    "print(f\"{len(all_examples)} examples with scores\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d95dcef6",
   "metadata": {},
   "outputs": [],
   "source": [
    "by_question_type = defaultdict(list)\n",
    "valids = defaultdict(list)\n",
    "rejects = defaultdict(list)\n",
    "invalids = defaultdict(list)\n",
    "valids_by_key = set()\n",
    "\n",
    "for example in tqdm(all_examples):\n",
    "\tassert example.response_a is not None and example.response_b is not None, \"Missing response_a or response_b\"\n",
    "\tassert example.judge_response_a_score is not None and example.judge_response_b_score is not None, \"Missing judge response scores\"\n",
    "\tby_question_type[example.question_type].append((example.judge_response_a_score, example.judge_response_b_score))\n",
    "\n",
    "\tabnormal_response_a = is_repper(example.response_a)\n",
    "\tabnormal_response_b = is_repper(example.response_b)\n",
    "\n",
    "\tif example.judge_winner == \"a\" and abnormal_response_a:\n",
    "\t\trejects[example.question_type].append(\"abnormal_winner\")\n",
    "\t\tinvalids[example.question_type].append(example)\n",
    "\t\tcontinue\n",
    "\telif example.judge_winner == \"b\" and abnormal_response_b:\n",
    "\t\trejects[example.question_type].append(\"abnormal_winner\")\n",
    "\t\tinvalids[example.question_type].append(example)\n",
    "\t\tcontinue\n",
    "\n",
    "\tif abs(example.judge_response_a_score - example.judge_response_b_score) < 0.2:\n",
    "\t\trejects[example.question_type].append(\"close_scores\")\n",
    "\t\tinvalids[example.question_type].append(example)\n",
    "\t\tcontinue\n",
    "\n",
    "\tif example.judge_winner == \"a\" and example.judge_response_a_score < 0.5:\n",
    "\t\trejects[example.question_type].append(\"winner_low_score\")\n",
    "\t\tinvalids[example.question_type].append(example)\n",
    "\t\tcontinue\n",
    "\telif example.judge_winner == \"b\" and example.judge_response_b_score < 0.5:\n",
    "\t\trejects[example.question_type].append(\"winner_low_score\")\n",
    "\t\tinvalids[example.question_type].append(example)\n",
    "\t\tcontinue\n",
    "\n",
    "\tvalids[example.question_type].append(example.judge_response_a_score if example.judge_winner == \"a\" else example.judge_response_b_score)\n",
    "\tk = (example.filehash, example.question)\n",
    "\tvalids_by_key.add(k)\n",
    "\n",
    "total_valid = sum(len(v) for v in valids.values())\n",
    "\n",
    "print(\"Question type statistics:\")\n",
    "for question_type in by_question_type.keys():\n",
    "\tprint(f\"{question_type}\")\n",
    "\tscores = [score for scores in by_question_type[question_type] for score in scores]\n",
    "\tn_scores = len(scores)\n",
    "\tn_examples = n_scores // 2\n",
    "\tmean_score = sum(scores) / n_scores\n",
    "\tmin_score = min(scores)\n",
    "\tmax_score = max(scores)\n",
    "\tn_valid = len(valids[question_type])\n",
    "\tmin_valid = min(valids[question_type])\n",
    "\tmax_valid = max(valids[question_type])\n",
    "\tmean_valid = sum(valids[question_type]) / n_valid\n",
    "\n",
    "\tprint(f\"  {n_examples} examples ({n_valid / total_valid:.2%} vs {question_types[question_type]:.2%} target)\")\n",
    "\tprint(f\"  {n_valid} valid examples ({n_valid / n_examples:.2%})\")\n",
    "\tprint(f\"  Mean score: {mean_score:.4f}\")\n",
    "\tprint(f\"  Min score: {min_score:.4f}\")\n",
    "\tprint(f\"  Max score: {max_score:.4f}\")\n",
    "\tprint(f\"  Valid scores: {mean_valid:.4f} ({min_valid:.4f} - {max_valid:.4f})\")\n",
    "\n",
    "\tfor reject_reason, count in Counter(rejects[question_type]).items():\n",
    "\t\tprint(f\"  {reject_reason}: {count} ({count / n_examples:.2%})\")\n",
    "\n",
    "print(f\"Total Valid: {sum(len(v) for v in valids.values())}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6de15fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "# Pass 1 ─ gather raw data\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "by_question_type   = defaultdict(list)   # (a_score, b_score)\n",
    "valids             = defaultdict(list)   # winner score only\n",
    "rejects            = defaultdict(list)   # list of textual reasons\n",
    "invalid_examples   = defaultdict(list)   # the examples themselves (if needed)\n",
    "\n",
    "for ex in tqdm(all_examples, desc=\"Scanning examples\"):\n",
    "    assert ex.response_a and ex.response_b,          \"Missing response text\"\n",
    "    assert ex.judge_response_a_score is not None and ex.judge_response_b_score is not None, \"Missing scores\"\n",
    "\n",
    "    by_question_type[ex.question_type].append(\n",
    "        (ex.judge_response_a_score, ex.judge_response_b_score)\n",
    "    )\n",
    "\n",
    "    abnormal_a = is_repper(ex.response_a)\n",
    "    abnormal_b = is_repper(ex.response_b)\n",
    "\n",
    "    # ---- rejection checks ----------------------------------------------------\n",
    "    reason = None\n",
    "    if ex.judge_winner == \"a\" and abnormal_a:\n",
    "        reason = \"abnormal_winner\"\n",
    "    elif ex.judge_winner == \"b\" and abnormal_b:\n",
    "        reason = \"abnormal_winner\"\n",
    "    elif abs(ex.judge_response_a_score - ex.judge_response_b_score) < 0.20:\n",
    "        reason = \"close_scores\"\n",
    "    elif ex.judge_winner == \"a\" and ex.judge_response_a_score < 0.50:\n",
    "        reason = \"winner_low_score\"\n",
    "    elif ex.judge_winner == \"b\" and ex.judge_response_b_score < 0.50:\n",
    "        reason = \"winner_low_score\"\n",
    "\n",
    "    if reason:\n",
    "        rejects[ex.question_type].append(reason)\n",
    "        invalid_examples[ex.question_type].append(ex)\n",
    "        continue\n",
    "\n",
    "    # ---- valid example -------------------------------------------------------\n",
    "    winner_score = (\n",
    "        ex.judge_response_a_score if ex.judge_winner == \"a\" else ex.judge_response_b_score\n",
    "    )\n",
    "    valids[ex.question_type].append(winner_score)\n",
    "\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "# Pass 2 ─ build tidy table\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "rows = []\n",
    "for qtype, pairs in by_question_type.items():\n",
    "    flat_scores = list(itertools.chain.from_iterable(pairs))\n",
    "    n_examples  = len(flat_scores) // 2\n",
    "    n_valid     = len(valids[qtype])\n",
    "\n",
    "    data = dict(\n",
    "        question_type = qtype,\n",
    "        n_examples    = n_examples,\n",
    "        n_valid       = n_valid,\n",
    "        pct_valid     = n_valid / n_examples if n_examples else math.nan,\n",
    "        mean_all      = sum(flat_scores) / len(flat_scores),\n",
    "        min_all       = min(flat_scores),\n",
    "        max_all       = max(flat_scores),\n",
    "        mean_valid    = (sum(valids[qtype]) / n_valid) if n_valid else math.nan,\n",
    "        min_valid     = min(valids[qtype]) if n_valid else math.nan,\n",
    "        max_valid     = max(valids[qtype]) if n_valid else math.nan,\n",
    "    )\n",
    "\n",
    "    # add reject-reason counts as extra columns (helps later if you want a bar chart)\n",
    "    for reason, cnt in Counter(rejects[qtype]).items():\n",
    "        data[f\"rej_{reason}\"] = cnt\n",
    "    rows.append(data)\n",
    "\n",
    "summary_df = (\n",
    "    pd.DataFrame(rows)\n",
    "      .set_index(\"question_type\")\n",
    "      .sort_values(\"question_type\")\n",
    "      .fillna(0)\n",
    ")\n",
    "\n",
    "# nicer formatting for notebook display\n",
    "pd.options.display.float_format = \"{:,.4f}\".format\n",
    "display(summary_df[\n",
    "    [\"n_examples\", \"n_valid\", \"pct_valid\", \"mean_all\",\n",
    "     \"min_all\", \"max_all\", \"mean_valid\", \"min_valid\", \"max_valid\"]\n",
    "])\n",
    "\n",
    "print(f\"\\nTOTAL VALID: {summary_df.n_valid.sum():,}\")\n",
    "\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "# Pass 3 ─ visualisations\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "num_types   = len(by_question_type)\n",
    "ncols       = 3\n",
    "nrows       = math.ceil(num_types / ncols)\n",
    "fig, axes   = plt.subplots(nrows, ncols, figsize=(ncols * 5, nrows * 4), sharex=True)\n",
    "\n",
    "all_axes = axes.flatten()\n",
    "for ax, (qtype, pairs) in zip(all_axes, by_question_type.items()):\n",
    "    flat_scores  = list(itertools.chain.from_iterable(pairs))\n",
    "    valid_scores = valids[qtype]\n",
    "\n",
    "    # full distribution\n",
    "    ax.hist(flat_scores, bins=20, alpha=0.4, label=\"all\")\n",
    "    # valid only\n",
    "    if valid_scores:\n",
    "        ax.hist(valid_scores, bins=20, alpha=0.7, label=\"valid\")\n",
    "        ax.axvline(summary_df.loc[qtype, \"mean_valid\"], color=\"red\", ls=\"--\", lw=1)\n",
    "\n",
    "    ax.set_title(qtype, fontsize=10)\n",
    "    ax.set_xlim(0, 1)\n",
    "    ax.set_ylim(0)            # autorescale high end\n",
    "    ax.set_xlabel(\"score\")\n",
    "    ax.set_ylabel(\"count\")\n",
    "\n",
    "# tidy up empty subplots\n",
    "for k in range(len(by_question_type), len(all_axes)):\n",
    "    fig.delaxes(all_axes[k])\n",
    "\n",
    "fig.suptitle(\"Score distributions by question type\", y=1.02, fontsize=14)\n",
    "fig.tight_layout()\n",
    "plt.show()\n",
    "\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "# OPTIONAL: stacked-bar of reject reasons\n",
    "# ────────────────────────────────────────────────────────────────────────────────\n",
    "\"\"\"\n",
    "rej_cols = [c for c in summary_df.columns if c.startswith(\"rej_\")]\n",
    "if rej_cols:\n",
    "    summary_df[rej_cols].plot(kind=\"bar\", stacked=True, figsize=(12, 4))\n",
    "    plt.title(\"Rejection reasons by question type\")\n",
    "    plt.ylabel(\"count\")\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\"\"\"\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d537a92",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d022fff0",
   "metadata": {},
   "outputs": [],
   "source": [
    "to_renew = {}\n",
    "\n",
    "for example in tqdm(random.sample(invalids[\"questionAnswer\"], len(invalids[\"questionAnswer\"]))):\n",
    "\tif example.filehash in to_renew:\n",
    "\t\tcontinue\n",
    "\n",
    "\tif (example.filehash, example.question) in valids_by_key:\n",
    "\t\tcontinue\n",
    "\n",
    "\tnew_example = EvalSampleMulti(\n",
    "\t\tfilehash=example.filehash,\n",
    "\t\tsystem=example.system,\n",
    "\t\tquestion=example.question,\n",
    "\t\tquestion_type=example.question_type,\n",
    "\t\tground_truth_knowledge=build_ground_truth_knowledge(example.filehash, example.question),\n",
    "\t)\n",
    "\n",
    "\tto_renew[example.filehash] = new_example\n",
    "\n",
    "print(f\"{len(to_renew)} examples to renew\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bff92072",
   "metadata": {},
   "outputs": [],
   "source": [
    "def pool_build_responses(example: EvalSampleMulti) -> EvalSampleMulti | None:\n",
    "\texample = example.get_image()\n",
    "\n",
    "\treppers = set()\n",
    "\tnon_reppers = set()\n",
    "\n",
    "\tfor _ in range(100):\n",
    "\t\tif (len(reppers) + len(non_reppers)) >= 10 and len(reppers) >= 3 and len(non_reppers) >= 3:\n",
    "\t\t\tbreak\n",
    "\n",
    "\t\tresponse = generate_response(example)\n",
    "\t\tif is_repper(response):\n",
    "\t\t\treppers.add(response)\n",
    "\t\telse:\n",
    "\t\t\tnon_reppers.add(response)\n",
    "\t\n",
    "\tif (len(reppers) + len(non_reppers)) < 10:\n",
    "\t\tprint(f\"Warning: not enough responses for {example.filehash.hex()}\")\n",
    "\t\t#print(f\"Question: {example.question}\")\n",
    "\t\t#print(\"Reppers:\")\n",
    "\t\t#for r in reppers:\n",
    "\t\t#\tprint(f\"  {r[:50]}...\")\n",
    "\t\t#print(\"Non-reppers:\")\n",
    "\t\t#for r in non_reppers:\n",
    "\t\t#\tprint(f\"  {r[:50]}...\")\n",
    "\t\treturn None\n",
    "\t\n",
    "\treppers = list(reppers)\n",
    "\tnon_reppers = list(non_reppers)\n",
    "\t\n",
    "\tresponses = reppers[:3]\n",
    "\tif len(non_reppers) > 0:\n",
    "\t\tresponses = responses + random.sample(non_reppers, min(len(non_reppers), 10 - len(responses)))\n",
    "\t\n",
    "\tif len(responses) < 10:\n",
    "\t\tresponses = responses + random.sample(reppers[3:], 10 - len(responses))\n",
    "\n",
    "\tassert len(responses) == 10\n",
    "\trandom.shuffle(responses)\n",
    "\n",
    "\treturn example.model_copy(update={\"responses\": responses, \"responses_model\": CURRENT_RESPONSE_MODEL})\n",
    "\n",
    "\n",
    "work = list(to_renew.values()) #[:256]\n",
    "\n",
    "# for example in tqdm(work):\n",
    "# \ttry:\n",
    "# \t\texample = pool_build_responses(example)\n",
    "# \texcept Exception as e:\n",
    "# \t\tprint(f\"Error generating responses for example ({type(example)}) {example.filehash.hex()}: {e}\")\n",
    "# \t\tcontinue\n",
    "\n",
    "# \tif example is None:\n",
    "# \t\tcontinue\n",
    "\n",
    "# \twith db_conn() as (conn, cur):\n",
    "# \t\texample.add_to_database(cur)\n",
    "# \t\tconn.commit()\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=8) as executor, tqdm(total=len(work)) as pbar, db_conn() as (conn, cur):\n",
    "\tfutures = [executor.submit(pool_build_responses, example) for example in work]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif result is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tassert isinstance(result, EvalSampleMulti), f\"Expected EvalSampleMulti, got {type(result)}\"\n",
    "\t\tassert result.responses is not None, f\"Missing responses in result: {result.filehash.hex()}\"\n",
    "\t\tassert result.responses_model is not None, f\"Missing responses model in result: {result.filehash.hex()}\"\n",
    "\n",
    "\t\tresult.add_to_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5757ca10",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2abf7b4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Retry invalid preferences\n",
    "by_pref_key = defaultdict(list)\n",
    "by_pref_key_is_valid = set()\n",
    "existing_pairs = set()\n",
    "existing_responses = defaultdict(set)\n",
    "\n",
    "for example in tqdm(all_examples):\n",
    "\tassert example.response_a is not None and example.response_b is not None, \"Missing response_a or response_b\"\n",
    "\tassert example.judge_response_a_score is not None and example.judge_response_b_score is not None, \"Missing judge response scores\"\n",
    "\n",
    "\texisting_pairs.add((example.filehash, example.system, example.question, example.response_a, example.response_b))\n",
    "\n",
    "\tpref_key = (example.filehash, example.system, example.question, example.question_type)\n",
    "\n",
    "\tby_pref_key[pref_key].append((example.response_a, example.judge_response_a_score))\n",
    "\tby_pref_key[pref_key].append((example.response_b, example.judge_response_b_score))\n",
    "\texisting_responses[pref_key].add(example.response_a)\n",
    "\texisting_responses[pref_key].add(example.response_b)\n",
    "\n",
    "\tabnormal_response_a = is_repper(example.response_a)\n",
    "\tabnormal_response_b = is_repper(example.response_b)\n",
    "\n",
    "\tif example.judge_winner == \"a\" and abnormal_response_a:\n",
    "\t\tcontinue\n",
    "\telif example.judge_winner == \"b\" and abnormal_response_b:\n",
    "\t\tcontinue\n",
    "\n",
    "\tif abs(example.judge_response_a_score - example.judge_response_b_score) < 0.2:\n",
    "\t\tcontinue\n",
    "\n",
    "\tif example.judge_winner == \"a\" and example.judge_response_a_score < 0.5:\n",
    "\t\tcontinue\n",
    "\telif example.judge_winner == \"b\" and example.judge_response_b_score < 0.5:\n",
    "\t\tcontinue\n",
    "\n",
    "\tby_pref_key_is_valid.add(pref_key)\n",
    "\n",
    "\n",
    "needs_reroll = []\n",
    "new_examples = []\n",
    "\n",
    "for pref_key, responses in tqdm(by_pref_key.items()):\n",
    "\tif pref_key in by_pref_key_is_valid:\n",
    "\t\tcontinue\n",
    "\n",
    "\tsorted_responses = sorted(responses, key=lambda x: x[1], reverse=True)\n",
    "\t#assert sorted_responses[0][1] > sorted_responses[-1][1]\n",
    "\n",
    "\tbest_responses = [(r, s) for r, s in sorted_responses if not is_repper(r) and s > 0.5]\n",
    "\tif len(best_responses) == 0:\n",
    "\t\tneeds_reroll.append(pref_key)\n",
    "\t\tcontinue\n",
    "\n",
    "\tbest_response, best_score = best_responses[0]\n",
    "\tworst_response, worst_score = sorted_responses[-1]\n",
    "\n",
    "\tif abs(best_score - worst_score) < 0.2 or best_score < worst_score:\n",
    "\t\tneeds_reroll.append(pref_key)\n",
    "\t\tcontinue\n",
    "\n",
    "\t# Looks like we have a valid pair to try\n",
    "\tif (pref_key[0], pref_key[1], pref_key[2], best_response, worst_response) in existing_pairs or (pref_key[0], pref_key[1], pref_key[2], worst_response, best_response) in existing_pairs:\n",
    "\t\t# Already tried it, so spin again\n",
    "\t\tneeds_reroll.append(pref_key)\n",
    "\t\tcontinue\n",
    "\n",
    "\tresponse_pair = [best_response, worst_response]\n",
    "\trandom.shuffle(response_pair)\n",
    "\n",
    "\tnew_examples.append(EvalSample(\n",
    "\t\tfilehash=pref_key[0],\n",
    "\t\tsystem=pref_key[1],\n",
    "\t\tquestion=pref_key[2],\n",
    "\t\tquestion_type=pref_key[3],\n",
    "\t\ttask_type='overall',\n",
    "\t\tresponse_a=response_pair[0],\n",
    "\t\tresponse_b=response_pair[1],\n",
    "\t\tground_truth_knowledge=build_ground_truth_knowledge(pref_key[0], pref_key[2]),\n",
    "\t))\n",
    "\n",
    "print(f\"Found {len(new_examples)} new examples to try\")\n",
    "print(f\"Found {len(needs_reroll)} examples to reroll\")\n",
    "print(f\"Found {len(by_pref_key_is_valid)} valid examples\")\n",
    "print(f\"Out of {len(by_pref_key)} keys\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8f0d1d5c",
   "metadata": {},
   "outputs": [],
   "source": [
    "cnts = defaultdict(int)\n",
    "cnts_v = defaultdict(int)\n",
    "\n",
    "for filehash, system, question, question_type in by_pref_key.keys():\n",
    "\tcnts[question_type] += 1\n",
    "\tif (filehash, system, question, question_type) in by_pref_key_is_valid:\n",
    "\t\tcnts_v[question_type] += 1\n",
    "\n",
    "for question_type, count in cnts.items():\n",
    "\tprint(f\"{question_type}: {count} total, {cnts_v[question_type]} valid ({cnts_v[question_type] / count:.2%})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cb8d4b5c",
   "metadata": {},
   "outputs": [],
   "source": [
    "with db_conn() as (conn, cur):\n",
    "\tfor example in new_examples:\n",
    "\t\texample.add_to_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0b4aa063",
   "metadata": {},
   "outputs": [],
   "source": [
    "def handle_new_example_responses(example: EvalSample) -> EvalSample | None:\n",
    "\texample = example.get_image()\n",
    "\n",
    "\tresponses = []\n",
    "\n",
    "\twhile True:\n",
    "\t\tresponse = generate_response(example)\n",
    "\t\tif response in existing_responses[(example.filehash, example.system, example.question, example.question_type)]:\n",
    "\t\t\tprint(\"Already tried response, spinning again\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tresponses.append(response)\n",
    "\t\tif len(responses) == 2:\n",
    "\t\t\tbreak\n",
    "\t\n",
    "\treturn example.model_copy(update={\"response_a\": responses[0], \"response_b\": responses[1], \"response_a_model\": CURRENT_RESPONSE_MODEL, \"response_b_model\": CURRENT_RESPONSE_MODEL})\n",
    "\n",
    "work = []\n",
    "for pref_key in tqdm(needs_reroll):\n",
    "\tnew_example = EvalSample(\n",
    "\t\tfilehash=pref_key[0],\n",
    "\t\tsystem=pref_key[1],\n",
    "\t\tquestion=pref_key[2],\n",
    "\t\tquestion_type=pref_key[3],\n",
    "\t\ttask_type='overall',\n",
    "\t\tground_truth_knowledge=build_ground_truth_knowledge(pref_key[0], pref_key[2]),\n",
    "\t)\n",
    "\twork.append(new_example)\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=8) as executor, tqdm(total=len(work)) as pbar, db_conn() as (conn, cur):\n",
    "\tfutures = [executor.submit(handle_new_example_responses, example) for example in work]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif result is None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tassert isinstance(result, EvalSample), f\"Expected EvalSample, got {type(result)}\"\n",
    "\t\tassert result.response_a is not None and result.response_b is not None, f\"Missing responses in result: {result.filehash.hex()}\"\n",
    "\t\tassert result.response_a_model is not None and result.response_b_model is not None, f\"Missing response models in result: {result.filehash.hex()}\"\n",
    "\n",
    "\t\tresult.add_to_database(cur)\n",
    "\t\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38de5d01",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "50345252",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0fa1d404",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "010e59e3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5d1c0224",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "159c9f02",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2c6e791",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4db5eae0",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8dfcc056",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2baaa0c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "42df6708",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bc76d093",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d7aaa99f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "7141eca3",
   "metadata": {},
   "source": [
    "## Evaluate DPO Progress"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e8c5d331",
   "metadata": {},
   "outputs": [],
   "source": [
    "remaining_api_images = set(API_IMAGES_TO_USE)\n",
    "for example in tqdm(list_db_preferences() + list_db_rankings()):\n",
    "\tremaining_api_images.discard(example.filehash)\n",
    "eval_filehashes = random.sample(list(remaining_api_images), 1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "81cf9372",
   "metadata": {},
   "outputs": [],
   "source": [
    "eval_dataurls = []\n",
    "\n",
    "for filehash in tqdm(eval_filehashes):\n",
    "\timage_data = api.read_image(filehash)\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\teval_dataurls.append(image_dataurl)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b5e29fd5",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eval_generate_response(idx: int, system: str, question: str, image_dataurl: str, temperature: float) -> tuple[int, str]:\n",
    "\t#client = openai.Client(base_url=\"http://localhost:5053/v1\", api_key=\"token-abc123\")\n",
    "\tclient = openai.Client(base_url=\"http://localhost:5052/v1\", api_key=\"token-abc123\")\n",
    "\tresponse = client.chat.completions.create(\n",
    "\t\t#model=\"mb3500zp\",\n",
    "\t\tmodel=\"5i5xmxdx\",\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": system,\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": question,\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\ttemperature=temperature,\n",
    "\t\ttop_p=0.9,\n",
    "\t\tmax_tokens=512,\n",
    "\t)\n",
    "\n",
    "\tassert len(response.choices) == 1, f\"Expected 1 responses, got {len(response.choices)}\"\n",
    "\tassert response.choices[0].message.content is not None, \"Response content is None\"\n",
    "\tresponse_a = response.choices[0].message.content.strip()\n",
    "\n",
    "\treturn idx, response_a\n",
    "\n",
    "\n",
    "system = \"You are JoyCaption, a helpful AI assistant with vision capabilities.\"\n",
    "#question = \"Respond as a real, human stable-diffusion user would.\"\n",
    "question = \"Output a description which could generate this image using Stable Diffusion. Write like the average human.\"\n",
    "\n",
    "\n",
    "work = []\n",
    "responses = []\n",
    "for i, image_dataurl in enumerate(eval_dataurls):\n",
    "\twork.append((i, system, question, image_dataurl, 0.6))\n",
    "\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=4) as executor, tqdm(total=len(work)) as pbar:\n",
    "\tfutures = [executor.submit(eval_generate_response, *args) for args in work]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tidx, response = result\n",
    "\t\tresponse = response.strip()\n",
    "\n",
    "\t\tresponses.append({\n",
    "\t\t\t\"filehash\": eval_filehashes[idx],\n",
    "\t\t\t\"response\": response,\n",
    "\t\t})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d2985b72",
   "metadata": {},
   "outputs": [],
   "source": [
    "failures = [response['response'] for response in responses if is_repper(response['response'])]\n",
    "print(f\"Failed responses: {len(failures)}\")\n",
    "with open(\"repper_responses.txt\", \"w\") as f:\n",
    "\tfor response in failures:\n",
    "\t\tf.write(response + \"\\n\")\n",
    "\t\tf.write(\"=\" * 80 + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f66cf7c1",
   "metadata": {},
   "outputs": [],
   "source": [
    "failures = [response['response'] for response in responses if is_repper(response['response'])]\n",
    "print(f\"Failed responses: {len(failures)}\")\n",
    "with open(\"repper_responses.txt\", \"w\") as f:\n",
    "\tfor response in failures:\n",
    "\t\tf.write(response + \"\\n\")\n",
    "\t\tf.write(\"=\" * 80 + \"\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "411f1d01",
   "metadata": {},
   "outputs": [],
   "source": [
    "n_failures = 0\n",
    "\n",
    "for response in dpo_model_responses:\n",
    "\tpieces = response[\"response\"].lower().split(\",\")\n",
    "\tn_reps = len(pieces) - len(set(pieces))\n",
    "\tif n_reps > 10:\n",
    "\t\tn_failures += 1\n",
    "\n",
    "print(f\"Number of failures: {n_failures}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f325d7b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('tmp-eval-hashes.txt', 'w') as f:\n",
    "\tfor filehash in eval_filehashes:\n",
    "\t\tf.write(f\"{filehash.hex()}\\n\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "491146c6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "301855fb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7f710ab1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "41a0b149",
   "metadata": {},
   "source": [
    "## Evaluate Accuracy of Various Judges"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "134a0bee",
   "metadata": {},
   "outputs": [],
   "source": [
    "with conn.cursor() as cur:\n",
    "\tcur.execute(\"SELECT image_hash, messages, response_a, response_b, winner, task_type FROM alignment_preferences_ai\")\n",
    "\texisting_results = []\n",
    "\tfor filehash, messages, response_a, response_b, winner, task_type in cur:\n",
    "\t\tassert winner in {\"a\", \"b\"}, f\"Invalid winner: {winner}\"\n",
    "\t\tmessages = json.loads(messages)\n",
    "\t\tassert len(messages) == 2 and messages[0][\"role\"] == 'system' and messages[1][\"role\"] == 'user'\n",
    "\t\tsystem = messages[0][\"content\"]\n",
    "\t\tquestion = messages[1][\"content\"]\n",
    "\t\texisting_results.append({\n",
    "\t\t\t\"filehash\": bytes(filehash),\n",
    "\t\t\t\"system\": system,\n",
    "\t\t\t\"question\": question,\n",
    "\t\t\t\"response_a\": response_a,\n",
    "\t\t\t\"response_b\": response_b,\n",
    "\t\t\t\"winner\": winner,\n",
    "\t\t\t\"task_type\": task_type,\n",
    "\t\t})\n",
    "\n",
    "\tprint(set(r['task_type'] for r in existing_results))\n",
    "\n",
    "\texisting_results.sort(key=lambda x: (x[\"filehash\"].hex(), x[\"response_a\"], x[\"response_b\"], x[\"winner\"]))\n",
    "\trandom.seed(69)\n",
    "\trandom.shuffle(existing_results)\n",
    "\texisting_subset = existing_results[:512]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "77e1513f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prep evaluations\n",
    "for result in tqdm(existing_subset):\n",
    "\timage_data = api.read_image(result[\"filehash\"])\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\tresult[\"image_dataurl\"] = image_dataurl\n",
    "\n",
    "\tsystem_message = ALPHA_SYSTEM_MESSAGE.strip()\n",
    "\tsystem_message = system_message + \"\\n\\n\" + PRIMARY_SYSTEM_MESSAGE[result['task_type']].strip()\n",
    "\tsystem_message = system_message + \"\\n\\n\" + result['system'].strip()\n",
    "\n",
    "\tground_truth_tags = None\n",
    "\tsource = None\n",
    "\n",
    "\tif result['task_type'] == 'all_tags':\n",
    "\t\tsource = 'danbooru' if 'danbooru' in result['question'].lower() else None\n",
    "\t\tsource = 'e621' if 'e621' in result['question'].lower() else source\n",
    "\t\tsource = 'rule34' if 'rule34' in result['question'].lower() else source\n",
    "\n",
    "\t\tassert source is not None, f\"Unknown source in question: {result['question']}\"\n",
    "\t\tground_truth_tags = image_to_tags(result['filehash'], source)\n",
    "\n",
    "\t\tuser_message = USER_MESSAGE_ALL_TAGS\n",
    "\telse:\n",
    "\t\tuser_message = USER_MESSAGE\n",
    "\t\n",
    "\t# Swap response_a and response_b so we can see if the result is indeterminate\n",
    "\tresponse_a = result['response_b']\n",
    "\tresponse_b = result['response_a']\n",
    "\tresult['expected_winner'] = 'a' if result['winner'] == 'b' else 'b'\n",
    "\t\n",
    "\tresponse_a_word_count = len(response_a.split())\n",
    "\tresponse_b_word_count = len(response_b.split())\n",
    "\n",
    "\tuser_message = user_message.format(\n",
    "\t\tsystem_prompt=system_message,\n",
    "\t\tuser_query=result['question'].strip(),\n",
    "\t\tresponse_a=response_a,\n",
    "\t\tresponse_a_word_count=response_a_word_count,\n",
    "\t\tresponse_b=response_b,\n",
    "\t\tresponse_b_word_count=response_b_word_count,\n",
    "\t\tground_truth_tags=ground_truth_tags,\n",
    "\t\tsource=source,\n",
    "\t).strip()\n",
    "\n",
    "\tresult['judge_system'] = PROMPT.strip()\n",
    "\tresult['judge_user'] = user_message"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "472a882f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def eval_pool_run_model(result, model):\n",
    "\ttry:\n",
    "\t\tif model == 'gpt-4.1':\n",
    "\t\t\twinner, reasoning = run_openai_model(result['judge_system'], result['judge_user'], result['image_dataurl'], model='gpt-4.1', reasoning_effort=None, temperature=0.5)\n",
    "\t\telif model == 'gemini-2.5-pro':\n",
    "\t\t\twinner, reasoning = run_openrouter_model(result['judge_system'], result['judge_user'], result['image_dataurl'], model=\"google/gemini-2.5-pro-preview-03-25\", reasoning_effort=None, temperature=0.5)\n",
    "\t\telif model == 'o3':\n",
    "\t\t\twinner, reasoning = run_openai_model(result['judge_system'], result['judge_user'], result['image_dataurl'], model='o3', reasoning_effort='medium', temperature=None)\n",
    "\t\telif model == 'o4-mini':\n",
    "\t\t\twinner, reasoning = run_openai_model(result['judge_system'], result['judge_user'], result['image_dataurl'], model='o4-mini', reasoning_effort='medium', temperature=None)\n",
    "\t\telif model == 'qwen2.5-vl-32b':\n",
    "\t\t\twinner, reasoning = run_openrouter_model(result['judge_system'], result['judge_user'], result['image_dataurl'], model=\"qwen/qwen2.5-vl-32b-instruct:free\", reasoning_effort=None, temperature=0.4)\n",
    "\t\telse:\n",
    "\t\t\tprint(f\"Unknown model: {model}\")\n",
    "\t\t\treturn result\n",
    "\texcept Exception as e:\n",
    "\t\tprint(f\"Error from run_openai_model: {e}\")\n",
    "\t\tprint(format_exc())\n",
    "\t\treturn result\n",
    "\n",
    "\tresult[f'{model}:judge_winner'] = winner\n",
    "\tresult[f'{model}:judge_reasoning'] = reasoning\n",
    "\n",
    "\treturn result\n",
    "\n",
    "\n",
    "#model = 'gpt-4.1'\n",
    "#model = 'gemini-2.5-pro'\n",
    "#model = 'o4-mini'\n",
    "model = 'o3'\n",
    "#model = 'qwen2.5-vl-32b'\n",
    "work = [result for result in existing_subset if f'{model}:judge_winner' not in result]\n",
    "#random.shuffle(work)\n",
    "#work = work[:32]\n",
    "\n",
    "with ThreadPoolExecutor(max_workers=12) as executor, tqdm(total=len(work)) as pbar:\n",
    "\tfutures = [executor.submit(eval_pool_run_model, result, model) for result in work]\n",
    "\tfor future in as_completed(futures):\n",
    "\t\tpbar.update(1)\n",
    "\t\ttry:\n",
    "\t\t\tresult = future.result()\n",
    "\t\texcept Exception as e:\n",
    "\t\t\tprint(f\"Error: {e}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif isinstance(result, str):\n",
    "\t\t\tprint(f\"Error: {result}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif f'{model}:judge_winner' not in result:\n",
    "\t\t\tprint(f\"Failure on: {result['filehash'].hex()}\")\n",
    "\t\t\tcontinue\n",
    "\n",
    "\n",
    "# for result in tqdm(work):\n",
    "# \timage = Image.open(io.BytesIO(base64.b64decode(result['image_dataurl'].split(\",\")[1])))\n",
    "# \tscale = 512 / max(image.size)\n",
    "# \timage = image.resize((int(image.size[0] * scale), int(image.size[1] * scale)), Image.LANCZOS)\n",
    "# \tdisplay(image)\n",
    "\n",
    "# \tresult = eval_pool_run_model(result, model)\n",
    "\n",
    "# \tprint(result['judge_system'])\n",
    "# \tprint(result['judge_user'])\n",
    "# \tprint(result[f'{model}:judge_reasoning'])\n",
    "# \tprint(\"WINNER:\", result[f'{model}:judge_winner'])\n",
    "# \tprint(\"EXPECTED WINNER:\", result['expected_winner'])\n",
    "\n",
    "# \tbreak"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a919b1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "serializable_subset = []\n",
    "for result in existing_subset:\n",
    "\tresult = {k: (v if k != 'filehash' else v.hex()) for k, v in result.items()}\n",
    "\tdel result['image_dataurl']\n",
    "\tserializable_subset.append(result)\n",
    "\n",
    "Path(\"tmp-eval-accuracy.json\").write_text(json.dumps(serializable_subset, indent=2))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7dffb840",
   "metadata": {},
   "outputs": [],
   "source": [
    "deserialized_subset = json.loads(Path(\"tmp-eval-accuracy.json\").read_text())\n",
    "for result in deserialized_subset:\n",
    "\tresult['filehash'] = bytes.fromhex(result['filehash'])\n",
    "\tresult['image_dataurl'] = f\"data:image/jpeg;base64,{base64.b64encode(api.read_image(result['filehash'])).decode('utf-8')}\"\n",
    "existing_subset = deserialized_subset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a486d546",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Measure accuracy\n",
    "n_correct = defaultdict(int)\n",
    "n_total = defaultdict(int)\n",
    "\n",
    "for result in existing_subset:\n",
    "\tmodels = set()\n",
    "\tfor k in result.keys():\n",
    "\t\tif k.endswith(':judge_winner'):\n",
    "\t\t\tmodel = k.split(':')[0]\n",
    "\t\t\tmodels.add(model)\n",
    "\t\n",
    "\texpected_winner = result['expected_winner']\n",
    "\n",
    "\tfor model in models:\n",
    "\t\twinner = result[f'{model}:judge_winner']\n",
    "\t\tif winner == expected_winner:\n",
    "\t\t\tn_correct[model] += 1\n",
    "\t\tn_total[model] += 1\n",
    "\n",
    "print(\"Accuracy:\")\n",
    "for model in n_total.keys():\n",
    "\taccuracy = n_correct[model] / n_total[model]\n",
    "\tprint(f\"{model}: {accuracy:.2%} ({n_correct[model]}/{n_total[model]})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2d37b8c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Filter down to only entries that o3 agrees on and then measure accuracy\n",
    "filtered_subset = [result for result in existing_subset if 'o3:judge_winner' in result and result['o3:judge_winner'] == result['expected_winner']]\n",
    "print(f\"Filtered subset size: {len(filtered_subset)}\")\n",
    "\n",
    "n_correct = defaultdict(int)\n",
    "n_total = defaultdict(int)\n",
    "\n",
    "for result in filtered_subset:\n",
    "\tmodels = set()\n",
    "\tfor k in result.keys():\n",
    "\t\tif k.endswith(':judge_winner'):\n",
    "\t\t\tmodel = k.split(':')[0]\n",
    "\t\t\tmodels.add(model)\n",
    "\t\n",
    "\texpected_winner = result['expected_winner']\n",
    "\n",
    "\tfor model in models:\n",
    "\t\twinner = result[f'{model}:judge_winner']\n",
    "\t\tif winner == expected_winner:\n",
    "\t\t\tn_correct[model] += 1\n",
    "\t\tn_total[model] += 1\n",
    "\n",
    "print(\"Accuracy:\")\n",
    "for model in n_total.keys():\n",
    "\taccuracy = n_correct[model] / n_total[model]\n",
    "\tprint(f\"{model}: {accuracy:.2%} ({n_correct[model]}/{n_total[model]})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4afdb942",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5bc1029f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "70bb8952",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "b3e9d0ec",
   "metadata": {},
   "source": [
    "## Build Training Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "66720fec",
   "metadata": {},
   "outputs": [],
   "source": [
    "DATASET_PATH = \"fancyfeast/joy-captioning-alignment-20250507a\"\n",
    "TEST_SIZE = 512\n",
    "MIN_WINNER_SCORE = 0.5\n",
    "MIN_SCORE_DIFF = 0.2\n",
    "\n",
    "\n",
    "dataset_features = datasets.Features({\n",
    "\t'filehash': datasets.Value('binary'),\n",
    "\t'system': datasets.Value('string'),\n",
    "\t'question': datasets.Value('string'),\n",
    "\t'question_type': datasets.Value('string'),\n",
    "\t'response_w': datasets.Value('string'),\n",
    "\t'response_l': datasets.Value('string'),\n",
    "\t'response_w_score': datasets.Value('float32'),\n",
    "\t'response_l_score': datasets.Value('float32'),\n",
    "})\n",
    "\n",
    "\n",
    "@dataclasses.dataclass(frozen=True)\n",
    "class DatasetEntry:\n",
    "\tfilehash: bytes\n",
    "\tsystem: str\n",
    "\tquestion: str\n",
    "\tquestion_type: str\n",
    "\tresponse_w: str\n",
    "\tresponse_l: str\n",
    "\tresponse_w_score: float\n",
    "\tresponse_l_score: float\n",
    "\n",
    "\tdef to_json_dict(self) -> dict:\n",
    "\t\tdata = dataclasses.asdict(self)\n",
    "\t\tdata['filehash'] = data['filehash'].hex()\n",
    "\t\treturn data\n",
    "\t\n",
    "\tdef to_dataset(self) -> dict:\n",
    "\t\treturn {\n",
    "\t\t\t'filehash': self.filehash,\n",
    "\t\t\t'system': self.system,\n",
    "\t\t\t'question': self.question,\n",
    "\t\t\t'question_type': self.question_type,\n",
    "\t\t\t'response_w': self.response_w,\n",
    "\t\t\t'response_l': self.response_l,\n",
    "\t\t\t'response_w_score': self.response_w_score,\n",
    "\t\t\t'response_l_score': self.response_l_score,\n",
    "\t\t}\n",
    "\n",
    "\n",
    "examples_by_filehash: dict[bytes, list[DatasetEntry]] = defaultdict(list)\n",
    "\n",
    "# Find all valid examples in the database\n",
    "for example in tqdm(list_db_preferences(), desc=\"Loading examples from database\"):\n",
    "\t# Must be from the current response model\n",
    "\tif example.response_a_model != CURRENT_RESPONSE_MODEL or example.response_b_model != CURRENT_RESPONSE_MODEL:\n",
    "\t\tcontinue\n",
    "\n",
    "\t# Must have responses\n",
    "\tif example.response_a is None or example.response_b is None or example.judge_winner is None:\n",
    "\t\tcontinue\n",
    "\n",
    "\t# Must have scores\n",
    "\tif example.judge_response_a_score is None or example.judge_response_b_score is None:\n",
    "\t\tcontinue\n",
    "\n",
    "\t# Gap between scores must be significant\n",
    "\tif abs(example.judge_response_a_score - example.judge_response_b_score) < MIN_SCORE_DIFF:\n",
    "\t\tcontinue\n",
    "\n",
    "\t# Winner must have a reasonable score and cannot be a repper\n",
    "\tif example.judge_winner == 'a':\n",
    "\t\tif example.judge_response_a_score < MIN_WINNER_SCORE or is_repper(example.response_a):\n",
    "\t\t\tcontinue\n",
    "\telif example.judge_winner == 'b':\n",
    "\t\tif example.judge_response_b_score < MIN_WINNER_SCORE or is_repper(example.response_b):\n",
    "\t\t\tcontinue\n",
    "\t\n",
    "\texamples_by_filehash[example.filehash].append(DatasetEntry(\n",
    "\t\tfilehash=example.filehash,\n",
    "\t\tsystem=example.system,\n",
    "\t\tquestion=example.question,\n",
    "\t\tquestion_type=example.question_type,\n",
    "\t\tresponse_w=example.response_a if example.judge_winner == 'a' else example.response_b,\n",
    "\t\tresponse_l=example.response_b if example.judge_winner == 'a' else example.response_a,\n",
    "\t\tresponse_w_score=example.judge_response_a_score if example.judge_winner == 'a' else example.judge_response_b_score,\n",
    "\t\tresponse_l_score=example.judge_response_b_score if example.judge_winner == 'a' else example.judge_response_a_score,\n",
    "\t))\n",
    "\n",
    "# Now filter down to the best example per filehash\n",
    "all_examples: list[DatasetEntry] = []\n",
    "\n",
    "for examples in tqdm(examples_by_filehash.values(), desc=\"Filtering examples\"):\n",
    "\t# Sort by winner score, descending\n",
    "\texamples.sort(key=lambda ex: ex.response_w_score, reverse=True)\n",
    "\n",
    "\t# Keep only the examples with the highest score\n",
    "\tbest_score = examples[0].response_w_score\n",
    "\texamples = [ex for ex in examples if ex.response_w_score >= best_score]\n",
    "\n",
    "\t# If we have repper losers, keep only those examples\n",
    "\tif any(is_repper(ex.response_l) for ex in examples):\n",
    "\t\texamples = [ex for ex in examples if is_repper(ex.response_l)]\n",
    "\t\n",
    "\t# Now pick the example with the largest margin\n",
    "\texamples.sort(key=lambda ex: abs(ex.response_w_score - ex.response_l_score), reverse=True)\n",
    "\n",
    "\tall_examples.append(examples[0])\n",
    "\n",
    "# Collect stats\n",
    "winner_scores = defaultdict(list)\n",
    "score_diffs = defaultdict(list)\n",
    "loser_reppers = defaultdict(int)\n",
    "\n",
    "for example in all_examples:\n",
    "\tscore_diffs[example.question_type].append(abs(example.response_w_score - example.response_l_score))\n",
    "\twinner_scores[example.question_type].append(example.response_w_score)\n",
    "\tloser_reppers[example.question_type] += 1 if is_repper(example.response_l) else 0\n",
    "\n",
    "print(f\"Total examples: {len(all_examples)}\")\n",
    "print(\"Stats by question type:\")\n",
    "for question_type in winner_scores.keys():\n",
    "\tavg_winner_score = sum(winner_scores[question_type]) / len(winner_scores[question_type])\n",
    "\tmax_winner_score = max(winner_scores[question_type])\n",
    "\tmin_winner_score = min(winner_scores[question_type])\n",
    "\tavg_score_diff = sum(score_diffs[question_type]) / len(score_diffs[question_type])\n",
    "\tmax_score_diff = max(score_diffs[question_type])\n",
    "\tmin_score_diff = min(score_diffs[question_type])\n",
    "\tn_loser_reppers = loser_reppers[question_type]\n",
    "\tprint(f\"{question_type}: count={len(winner_scores[question_type])}, avg_winner_score={avg_winner_score:.4f}, max_winner_score={max_winner_score:.4f}, min_winner_score={min_winner_score:.4f}, avg_score_diff={avg_score_diff:.4f}, max_score_diff={max_score_diff:.4f}, min_score_diff={min_score_diff:.4f}, n_loser_reppers={n_loser_reppers} ({n_loser_reppers / len(winner_scores[question_type]):.4%})\")\n",
    "\n",
    "# Build dataset\n",
    "dataset = datasets.Dataset.from_list([example.to_dataset() for example in all_examples], features=dataset_features)\n",
    "\n",
    "# Split\n",
    "dataset = dataset.train_test_split(test_size=TEST_SIZE, shuffle=True, seed=69)\n",
    "\n",
    "print(\"Pushing to hub\")\n",
    "dataset.push_to_hub(DATASET_PATH, private=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "19c2e0e9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4d8d5a90",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f2d0f0ac",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3f730ec3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c738051",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "887c1f84",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "a38206e5",
   "metadata": {},
   "source": [
    "## Glitch Filter Development"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86863268",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "from collections import Counter\n",
    "\n",
    "# ----------------------------------------------------------------------\n",
    "#  Utility --------------------------------------------------------------\n",
    "# ----------------------------------------------------------------------\n",
    "def _longest_internal_repeat(s: str) -> int:\n",
    "    \"\"\"\n",
    "    Crude check for “aaa… / ab_ab_ab…” patterns inside ONE very-long token.\n",
    "    Returns length of the largest substring that appears ≥ 4 contiguous times.\n",
    "    \"\"\"\n",
    "    max_len = len(s) // 4                       # need 4× to be interesting\n",
    "    for size in range(1, max_len + 1):\n",
    "        chunk = s[:size]\n",
    "        if chunk * (len(s) // size) in s:\n",
    "            return size * (len(s) // size)\n",
    "    return 0\n",
    "\n",
    "\n",
    "# ----------------------------------------------------------------------\n",
    "#  Main detector --------------------------------------------------------\n",
    "# ----------------------------------------------------------------------\n",
    "def is_abnormally_repetitive(text: str) -> bool:\n",
    "    \"\"\"\n",
    "    Conservative detector for useless repetition / tag-spam.\n",
    "\n",
    "    Returns True only when repetition is very likely harmful.\n",
    "    Five lightweight heuristics are combined; thresholds are tuned\n",
    "    to avoid false-positives on ordinary prose or short enumerations.\n",
    "    \"\"\"\n",
    "\n",
    "    tokens = [t.lower() for t in re.split(r\"\\W+\", text) if t]\n",
    "    n = len(tokens)\n",
    "    if n < 30:                                     # tiny blocks → never flag\n",
    "        return False\n",
    "\n",
    "    # --------------------------------------------------- 1\n",
    "    # ≥10 identical tokens BACK-TO-BACK\n",
    "    if sum(tokens[i] == tokens[i - 1] for i in range(1, n)) >= 10:\n",
    "        return True\n",
    "\n",
    "    # --------------------------------------------------- 2\n",
    "    # Heavy duplicate ratio on “content” words (len ≥4)\n",
    "    content = [t for t in tokens if len(t) >= 4]\n",
    "    if len(content) >= 50:\n",
    "        dup_count  = len(content) - len(set(content))\n",
    "        dup_ratio  = dup_count / len(content)\n",
    "        if dup_ratio > 0.55 and dup_count >= 100:      # both conditions\n",
    "            return True\n",
    "\n",
    "    # --------------------------------------------------- 3\n",
    "    # Repeated 3-gram loops (“water ocean waves …”)\n",
    "    tris = [' '.join(tokens[i:i + 3]) for i in range(n - 2)]\n",
    "    if tris:\n",
    "        most_common_tri, freq = Counter(tris).most_common(1)[0]\n",
    "        if freq >= 10 and freq / len(tris) > 0.20:\n",
    "            return True\n",
    "\n",
    "    # --------------------------------------------------- 4\n",
    "    # Colon-style tag duplication  (e.g.   meta:icon_set … )\n",
    "    colon_tags = [t for t in tokens if ':' in t]\n",
    "    if len(colon_tags) >= 30:\n",
    "        tag_dup = len(colon_tags) - len(set(colon_tags))\n",
    "        if tag_dup >= 20 and tag_dup / len(colon_tags) > 0.30:\n",
    "            return True\n",
    "\n",
    "    # --------------------------------------------------- 5A\n",
    "    #   Common-prefix flood  (tokens differ only by added suffix words)\n",
    "    if n >= 50:\n",
    "        for k in (15, 20, 25):                       # test several slice sizes\n",
    "            counts = Counter(t[:k] for t in tokens)\n",
    "            top_slice, freq = counts.most_common(1)[0]\n",
    "            uniq_with_slice = {t for t in tokens if t.startswith(top_slice)}\n",
    "            if freq / n > 0.18 and len(uniq_with_slice) >= 15:\n",
    "                return True\n",
    "\n",
    "    # --------------------------------------------------- 5B\n",
    "    #   Internal repetition inside ONE very long token\n",
    "    for tok in tokens:\n",
    "        if len(tok) > 80 and _longest_internal_repeat(tok) / len(tok) > 0.65:\n",
    "            return True\n",
    "\n",
    "    # --------------------------------------------------- 5C\n",
    "    #   Numeric-suffix families (“foo_1 … foo_40”)\n",
    "    num_suffix = re.compile(r'^(.+?)_(\\d{1,3})$')\n",
    "    buckets: dict[str, set[int]] = {}\n",
    "    for t in tokens:\n",
    "        m = num_suffix.match(t)\n",
    "        if m:\n",
    "            buckets.setdefault(m.group(1), set()).add(int(m.group(2)))\n",
    "    if any(len(nums) >= 15 for nums in buckets.values()):\n",
    "        return True\n",
    "\n",
    "    return False\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "with conn.cursor() as cur, open('tmp-is-repper.txt', 'w') as f, open('tmp-is-not-repper.txt', 'w') as f2:\n",
    "\tcur.execute(\"SELECT responses FROM alignment_rankings_ai\")\n",
    "\tall_responses = list(itertools.chain.from_iterable(row[0] for row in cur))\n",
    "\n",
    "\tfor response in random.sample(all_responses, 760):\n",
    "\t\tif is_abnormally_repetitive(response):\n",
    "\t\t\tf.write(f\"{response}\\n------------------\\n\\n\")\n",
    "\t\telse:\n",
    "\t\t\tf2.write(f\"{response}\\n------------------\\n\\n\")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "662c38a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "len(all_responses)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad6ec033",
   "metadata": {},
   "outputs": [],
   "source": [
    "is_abnormally_repetitive(\"\"\"This photograph captures an elderly man engaged in wood carving in a rustic workshop. The man, who has short gray hair and a beard, is focused on his work, holding a small wooden carving in his hands. He is dressed in a short-sleeved, white and gray striped shirt and blue jeans. The workshop is filled with various woodworking tools and materials, including a large wooden block on the table in front of him, a chisel in his right hand, and a small wooden piece in his left. \n",
    "\n",
    "The background reveals a wooden wall with a window on the right side, allowing natural light to illuminate the workspace. Hanging tools and wooden pieces are visible on the left side of the image. The floor is covered in sawdust, indicating active use. The man is standing near the center of the image, slightly to the right, with his back to a wooden door that is partially open, revealing a green, sunlit garden outside. \n",
    "\n",
    "The overall atmosphere of the photograph is warm and industrious, highlighting the man's dedication to his craft. The textures of the wood, the tools, and the man's clothing are clearly defined, adding to the authenticity of the scene. The image conveys a sense of tradition and craftsmanship in a serene, rural setting.\"\"\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "022ce32c",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "98071635",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "95571852",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "04946434",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "715d1c93",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3e3c5872",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7dec73a3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "aea230f6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f6a6a1a1",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "1e4629a7",
   "metadata": {},
   "source": [
    "## Evaluate Model Performance\n",
    "Compare the performance of two different models against each other. This is done by taking the latest test set (which should not have been included in the training set for any previous model) and generating a response for each model. The responses are then compared using the usual method to determine the win-rate of each model."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bde64e15",
   "metadata": {},
   "outputs": [],
   "source": [
    "#ds = datasets.load_dataset(\"fancyfeast/joy-captioning-alignment-20250428a\", split=\"test\")\n",
    "#ds = datasets.load_dataset(\"fancyfeast/joy-captioning-alignment-20250501a\", split=\"test\")\n",
    "ds = datasets.load_dataset(\"fancyfeast/joy-captioning-alignment-20250507a\", split=\"test\")\n",
    "client_a = openai.Client(base_url=\"http://localhost:5052/v1\", api_key=\"token-abc123\")\n",
    "client_a_model = \"5i5xmxdx\"\n",
    "client_b = openai.Client(base_url=\"http://localhost:5053/v1\", api_key=\"token-abc123\")\n",
    "#client_b_model = \"8gjfxjdm\"\n",
    "#client_b_model = \"kiqxrbng\"\n",
    "client_b_model = \"mb3500zp\"\n",
    "\n",
    "all_examples = []\n",
    "\n",
    "for example in tqdm(ds):\n",
    "\timage_data = api.read_image(example['filehash'])\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\tresponse_a = client_a.chat.completions.create(\n",
    "\t\tmodel=client_a_model,\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": example['system'],\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": example['question'],\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\ttemperature=0.6,\n",
    "\t\ttop_p=0.9,\n",
    "\t\tmax_tokens=512,\n",
    "\t)\n",
    "\tresponse_a = response_a.choices[0].message.content.strip()\n",
    "\n",
    "\tresponse_b = client_b.chat.completions.create(\n",
    "\t\tmodel=client_b_model,\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": example['system'],\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": example['question'],\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\ttemperature=0.6,\n",
    "\t\ttop_p=0.9,\n",
    "\t\tmax_tokens=512,\n",
    "\t)\n",
    "\tresponse_b = response_b.choices[0].message.content.strip()\n",
    "\n",
    "\t# Randomize the order of the responses to prevent bias\n",
    "\tif random.random() < 0.5:\n",
    "\t\tresponses = [response_a, response_b]\n",
    "\t\tresponse_models = [client_a_model, client_b_model]\n",
    "\telse:\n",
    "\t\tresponses = [response_b, response_a]\n",
    "\t\tresponse_models = [client_b_model, client_a_model]\n",
    "\t\n",
    "\tall_examples.append(EvalSample(\n",
    "\t\tfilehash=example['filehash'],\n",
    "\t\tsystem=example['system'],\n",
    "\t\tquestion=example['question'],\n",
    "\t\tquestion_type=example['question_type'],\n",
    "\t\ttask_type='overall',\n",
    "\t\tresponse_a=responses[0],\n",
    "\t\tresponse_b=responses[1],\n",
    "\t\tresponse_a_model=response_models[0],\n",
    "\t\tresponse_b_model=response_models[1],\n",
    "\t\tground_truth_knowledge=build_ground_truth_knowledge(example['filehash'], example['question']),\n",
    "\t))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2f7c938d",
   "metadata": {},
   "outputs": [],
   "source": [
    "with db_conn() as (conn, cur):\n",
    "\tfor example in tqdm(all_examples):\n",
    "\t\texample.add_to_database(cur)\n",
    "\tconn.commit()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8768dc67",
   "metadata": {},
   "outputs": [],
   "source": [
    "# At this point you need to go have all the preferences judged using Parallel Preferences or Batched Preferences\n",
    "# This assumes that client_b_model is a new model that we haven't processed before, so all preferences in the database that include it must be from our evaluation.\n",
    "all_examples = list_db_preferences()\n",
    "all_examples = [e for e in all_examples if e.response_a_model == client_b_model or e.response_b_model == client_b_model]\n",
    "print(f\"Total examples: {len(all_examples)}\")\n",
    "\n",
    "winner_counts = defaultdict(int)\n",
    "scores = defaultdict(list)\n",
    "repper_cnt = defaultdict(int)\n",
    "\n",
    "with open('tmp-is-repper.txt', 'w') as f:\n",
    "\tfor example in all_examples:\n",
    "\t\tassert example.judge_winner is not None and example.judge_response_a_score is not None and example.judge_response_b_score is not None, f\"Missing winner or scores in example: {example.id}\"\n",
    "\t\tassert example.response_a is not None and example.response_b is not None, f\"Missing responses in example: {example.id}\"\n",
    "\t\tif example.judge_winner == 'a':\n",
    "\t\t\twinner_counts[example.response_a_model] += 1\n",
    "\t\telse:\n",
    "\t\t\twinner_counts[example.response_b_model] += 1\n",
    "\t\t\n",
    "\t\tscores[example.response_a_model].append(example.judge_response_a_score)\n",
    "\t\tscores[example.response_b_model].append(example.judge_response_b_score)\n",
    "\n",
    "\t\tif is_repper(example.response_a):\n",
    "\t\t\trepper_cnt[example.response_a_model] += 1\n",
    "\t\t\tf.write(f\"Repper response A ({example.response_a_model}): {example.response_a}\\n------------------\\n\\n\")\n",
    "\t\tif is_repper(example.response_b):\n",
    "\t\t\trepper_cnt[example.response_b_model] += 1\n",
    "\t\t\tf.write(f\"Repper response B ({example.response_b_model}): {example.response_b}\\n------------------\\n\\n\")\n",
    "\n",
    "print(\"Winner counts:\")\n",
    "for model, count in winner_counts.items():\n",
    "\tprint(f\"{model}: {count}\")\n",
    "\n",
    "print(\"Scores:\")\n",
    "for model, score_list in scores.items():\n",
    "\tavg_score = sum(score_list) / len(score_list)\n",
    "\tprint(f\"{model}: avg_score={avg_score:.4f}, count={len(score_list)}\")\n",
    "\n",
    "print(\"Repper counts:\")\n",
    "for model, count in repper_cnt.items():\n",
    "\tprint(f\"{model}: {count} ({count / len(all_examples):.4%})\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e367f186",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1e58ca3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "810a7dcf",
   "metadata": {},
   "source": [
    "## Seed Next Model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "18268ecf",
   "metadata": {},
   "outputs": [],
   "source": [
    "ds = datasets.load_dataset(\"fancyfeast/joy-captioning-alignment-20250428a\", split=\"test\")\n",
    "test_filehashes = set(ds['filehash'])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0be89a1e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Clear out old examples with no judgements\n",
    "n_deleted = 0\n",
    "with db_conn() as (conn, cur):\n",
    "\tfor example in tqdm(list_db_rankings()):\n",
    "\t\tif example.judge_reasoning is not None:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif example.responses is None:\n",
    "\t\t\tcur.execute(\"DELETE FROM alignment_rankings_ai WHERE id = %s\", (example.id,))\n",
    "\t\t\tn_deleted += 1\n",
    "\t\t\tcontinue\n",
    "\t\t\n",
    "\t\tif example.question_type != 'questionAnswer':\n",
    "\t\t\tcur.execute(\"DELETE FROM alignment_rankings_ai WHERE id = %s\", (example.id,))\n",
    "\t\t\tn_deleted += 1\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif example.filehash in test_filehashes:\n",
    "\t\t\tcur.execute(\"DELETE FROM alignment_rankings_ai WHERE id = %s\", (example.id,))\n",
    "\t\t\tn_deleted += 1\n",
    "\t\t\tcontinue\n",
    "\t\n",
    "\tconn.commit()\n",
    "\tprint(f\"Deleted {n_deleted} examples\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "36c092f4",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Add all of our human VQA examples\n",
    "existing_filehashes = set()\n",
    "existing_filehashes.update(test_filehashes)\n",
    "\n",
    "for example in tqdm(list_db_rankings()):\n",
    "\tif example.responses_model == CURRENT_RESPONSE_MODEL:\n",
    "\t\texisting_filehashes.add(example.filehash)\n",
    "\n",
    "results = api.search(\"EXISTS(vqa_category) AND EXISTS(questionAnswer)\", [\"hash\", \"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "\n",
    "n_added = 0\n",
    "with db_conn() as (conn, cur):\n",
    "\tfor result in tqdm(results):\n",
    "\t\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.hash is not None and result.attributes is not None\n",
    "\t\tif result.hash in existing_filehashes:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tsources = result.attributes.get('source', [])\n",
    "\t\tvqa_category, = result.attributes['vqa_category'].keys()\n",
    "\t\tquestionAnswer, = result.attributes['questionAnswer'].keys()\n",
    "\t\tquestionAnswer = json.loads(questionAnswer)\n",
    "\t\tvqa_category = vqa_category.strip()\n",
    "\t\tquestion = questionAnswer['question'].strip()\n",
    "\t\tanswer = questionAnswer['answer'].strip()\n",
    "\n",
    "\t\tif vqa_category == '' or question == '' or answer == '' or 'benchmark' in vqa_category.lower():\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif 'bra_sizing' in sources and result.hash.hex()[0] != '0':\n",
    "\t\t\t# Filter the bra related questions, so they don't overload the dataset.\n",
    "\t\t\t# using the hash knocks it down by 1/16th, deterministically\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif \"<system>\" in question:\n",
    "\t\t\ti = question.index(\"<system>\")\n",
    "\t\t\tj = question.index(\"</system>\")\n",
    "\t\t\tsystem_message = question[i + len(\"<system>\"):j]\n",
    "\t\t\tquestion = question[:i] + question[j + len(\"</system>\"):]\n",
    "\t\telse:\n",
    "\t\t\tsystem_message = random.choice(system_prompts)\n",
    "\t\t\n",
    "\t\tquestion = question.strip()\n",
    "\n",
    "\t\texample = EvalSampleMulti(\n",
    "\t\t\tfilehash=result.hash,\n",
    "\t\t\tsystem=system_message,\n",
    "\t\t\tquestion=question,\n",
    "\t\t\tquestion_type='questionAnswer',\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(result.hash, question),\n",
    "\t\t\tresponses_model=CURRENT_RESPONSE_MODEL,  # So we can track it\n",
    "\t\t)\n",
    "\t\texample.add_to_database(cur)\n",
    "\t\texisting_filehashes.add(result.hash)\n",
    "\t\tn_added += 1\n",
    "\t\n",
    "\tconn.commit()\n",
    "\n",
    "print(f\"Added {n_added} examples\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b902cc60",
   "metadata": {},
   "outputs": [],
   "source": [
    "# List all existing rankings\n",
    "n = 32000\n",
    "existing_types = defaultdict(int)\n",
    "existing_filehashes = set()\n",
    "existing_filehashes.update(test_filehashes)\n",
    "for example in list_db_rankings():\n",
    "\tif example.responses_model == CURRENT_RESPONSE_MODEL:\n",
    "\t\texisting_types[example.question_type] += 1\n",
    "\texisting_filehashes.add(example.filehash)\n",
    "for example in list_db_preferences():\n",
    "\texisting_filehashes.add(example.filehash)\n",
    "\n",
    "# Figure out what VQA examples we have left and what API images we can use\n",
    "remaining_vqa = [e for e in vqa_examples if e.filehash not in existing_filehashes]\n",
    "remaining_api_images = list(set(API_IMAGES_TO_USE) - existing_filehashes)\n",
    "\n",
    "print(f\"# remaining VQA examples: {len(remaining_vqa)}\")\n",
    "print(f\"# remaining API images: {len(remaining_api_images)}\")\n",
    "\n",
    "random.shuffle(remaining_vqa)\n",
    "random.shuffle(remaining_api_images)\n",
    "\n",
    "print({k: (int(v * n), existing_types[k]) for k, v in question_types.items()})\n",
    "\n",
    "target_counts = {k: max(0, int(v * n) - existing_types[k]) for k, v in question_types.items()}\n",
    "print(target_counts)\n",
    "examples = []\n",
    "\n",
    "for question_type, target_count in tqdm(target_counts.items()):\n",
    "\tfor _ in range(target_count):\n",
    "\t\tif question_type == 'questionAnswer':\n",
    "\t\t\tif len(remaining_vqa) == 0:\n",
    "\t\t\t\tcontinue\n",
    "\n",
    "\t\t\texamples.append(remaining_vqa.pop())\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tif len(remaining_api_images) == 0:\n",
    "\t\t\tcontinue\n",
    "\n",
    "\t\tfilehash = remaining_api_images.pop()\n",
    "\t\tsystem = random.choice(system_prompts).strip()\n",
    "\t\tquestion = get_random_prompt(question_type, filehash)\n",
    "\t\t# HACK BECAUSE WE FORGOT TO ADD EXTENSIONS\n",
    "\t\tn_extensions = random.randint(1, 3)\n",
    "\t\textentions = [random.choice([\" \", \"\\n\", \"\\n\\n\"]) + x for x in random.sample(PROMPT_EXTENSIONS, n_extensions)]\n",
    "\t\tquestion = question + ''.join(extentions)\n",
    "\t\t###\n",
    "\t\texamples.append(EvalSampleMulti(\n",
    "\t\t\tfilehash=filehash,\n",
    "\t\t\tsystem=system,\n",
    "\t\t\tquestion=question,\n",
    "\t\t\tquestion_type=question_type,\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(filehash, question),\n",
    "\t\t\tresponses_model=CURRENT_RESPONSE_MODEL,  # So we can track it\n",
    "\t\t))\n",
    "\n",
    "print(f\"Generated {len(examples)} examples to add to the database\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0743dbe0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Now insert all the new examples into the database\n",
    "with db_conn() as (conn, cur):\n",
    "\texamples = [example.add_to_database(cur) for example in tqdm(examples)]\n",
    "\tconn.commit()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a02632cb",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "06d53288",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9a2b675a",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f081be83",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1defe5c3",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "36658ff6",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d6c4a4f9",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5a9898a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "questions = [\n",
    "\t\"{tag_string}\\n\\nPlease write an SDXL prompt for this image. The above are the booru tags that were associated with the image. The prompt include all of those tags. The prompt will be used by SDXL to try and recreate the image as exactly as possible. So make sure the prompt is complete, accurate, and detailed.\",\n",
    "\t\"{tag_string}\\n\\nI need an SDXL-like prompt.\",\n",
    "\t\"{tag_string}\\n\\nIncorporate as many of these tags into your c aption as possible while staying under {word_count} words and writing without describing mood and just being straightrfoward.\",\n",
    "\t\"Concise straightforward caption using some of the tags below. {word_count} words MAX.\\n\\n{tag_string}\",\n",
    "\t\"\"\"{tag_string}\\n\\nYou have up to {word_count} words to write a caption for this image in a straightforward way without bullshit descriptors like \"This image is...\" or ambiguity.\"\"\",\n",
    "\t\"I need a caption that is straightforward without bullshit, thanks.\\n\\n{tag_string}\\n\\nNot too, too long, like {word_count} words or a bit more?\",\n",
    "\t\"{tag_string}\\n\\nWrite a {length} description for this image.  The above are booru tags for this image.  Use them to enhance the caption you write.\",\n",
    "\t\"{tag_string}\\n\\n\\nWrite an SDXL prompt using the attached image and the above tags\",\n",
    "\t\"\"\"I have these tags for this image {tag_string}\\n\\nI need a caption. No bullshit like \"This image is\", just a straightforward caption within {word_count} words covering all details needed and incorporating the tags when you can.\"\"\",\n",
    "\t\"{tag_string}\\n\\nI need an SDXL-like prompt. Include these tags.\",\n",
    "\t\"\"\"Incorpoating these tags when possible write out a sttraightforward caption. Do not use the word \"featuring\" or anything like it!\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"I would like a straightforward caption.\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nGimmie a straightforward caption\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nI would like you to output  a caption with straighforward styule of writing the caption\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite an accurate caption in a straightforward style.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nBased on these booru tags and the image write out an SDXL prompt\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\n\\nI would like a SDXL prompt written based on the image and the tags above. Less than {word_count} words\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write an SDXL prompt for this image. The above are the booru tags that were associated with the image. The prompt include all of those tags. The prompt will be used by SDXL to try and recreate the image as exactly as possible. So make sure the prompt is complete, accurate, and detailed.\"\"\",\n",
    "\t\"\"\"Describe this image in a non-conversational, objective manner. Do not offer opinions, interpretations, or emotional reactions. Focus on specific, observable details. Avoid casual language or phrasing. Include descriptions of any text, watermarks, or identifying marks. Make sure you use these booru tags to help ({tag_string})\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write a caption for this image. The above are the booru tags that were associated with the image. The caption must use all of those tags. The caption will be used by a test-to-image model to try and recreate the image as exactly as possible. So make sure the caption is complete, accurate, and detailed.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image. The tags listed above are from the image's danbooru/e621/rule34 page. Please include them in the caption when they are applicable (they may not be 100% accurate).\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a {length} description and make sure to include these tags in a fluid way.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image.  The above are booru tags for this image.  Use them to enhance the caption you write.\"\"\",\n",
    "\t\"\"\"Provide a concise yet complete caption, noting all visible details like physiques, genitals, gender, ethnicity, or celebrity references whenever present. Always specify the medium (photo, painting, etc.). For a film still, include the film title and director; for art, cite the artist. Avoid starting with “This image...” and words such as “depicts,” “likely,” or “appears.” Use direct language about colors, shapes, layouts. Do not guess unobservable facts. Accurately transcribe any text. Mention watermarks or signatures without speculation. Keep the description below 100 words, leaving out mood or subjective interpretations. Vary sentence structure to avoid repetition. The following booru tags might help:\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nI want a caption that is straightforward and uses these tags. Ensure high accuracy.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nUsing these tags to help you, write a caption that is straightforward without any bullshit, just a caption.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write a {length} caption that incorporates these tags.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a straightforward caption without fluff for this image using the tags above to enhance accuracy. Up to {word_count} words max.\"\"\",\n",
    "\t\"\"\"OUTPUT A N SDXL PROMPT BASED ON THE IMAGE AND THE TAGS BELOW DANKE\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"Act as a straightforward, non-censoring image captioner who includes every relevant detail about subjects—body features, genitals, gender, ethnicity, celebrity, game, anime references, attractiveness, etc.—if they’re visible. State the medium (painting, photo, screenshot, etc.) in the description. If it’s a movie still, name the movie and director; if it’s artwork, name the artist. Avoid phrases like “This image shows...,” “depicts,” “appears,” “seems,” or “likely,” and begin with direct statements. Describe what is observable—colors, shapes, positions, clothing, etc.—without guessing. Transcribe any text precisely. Identify any watermarks or signatures but do not speculate about them. Limit your caption to under 200 words, skipping subjective moods or feelings. Vary your sentence structures and avoid repeating the same phrasing. The booru tags below must be used to help write the caption:\\n{tag_string}\"\"\",\n",
    "\t\"\"\"I want to give booru tags as additional context for a description: '{tag_string}'. Can you write a description that takes this info into account?\" \"\"\",\n",
    "\t\"\"\"booru tags: {tag_string}\\n\\nPlease write a concise, editorial-style description of the attached image. Begin with a phrase indicating the type of image (e.g., 'A photograph of…', 'A digital illustration of…', or 'A movie still from…'), then describe the key visual details and aesthetic qualities. If there is a watermark, note its text and location. Keep it factual, in third person, and under {word_count} words.\"\"\",\n",
    "\t\"\"\"## Tags\\n{tag_string}\\n\\n## Output\\nSDXL prompt\\n\\n## Instructions\\nIncorporate tags\\n\\n## Constraint\\nMaximum of {word_count} words\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image in a formal tone.\"\"\",\n",
    "\t\"\"\"Wirte the alt-text.\\n\\n{tag_string}\\n\\n\"\"\",\n",
    "]\n",
    "\n",
    "n_added = 0\n",
    "for _ in tqdm(range(30)):\n",
    "\twith db_conn() as (conn, cur):\n",
    "\t\twhile True:\n",
    "\t\t\tfilehash = remaining_api_images.pop()\n",
    "\t\t\ttag_strings = image_to_tags(filehash)\n",
    "\n",
    "\t\t\tif len(tag_strings) > 0:\n",
    "\t\t\t\tbreak\n",
    "\t\t\n",
    "\t\ttag_string = random.choice(list(tag_strings.values()))\n",
    "\t\ttags = [x.strip() for x in tag_string.split(\",\") if x.strip()]\n",
    "\t\tif len(tags) < 10:\n",
    "\t\t\tcontinue\n",
    "\t\ttags = random.sample(tags, int(random.uniform(0.2, 1.0) * len(tags)))\n",
    "\t\tassert len(tags) > 0, f\"No tags found for image {filehash.hex()}\"\n",
    "\t\ttag_string = \", \".join(tags)\n",
    "\n",
    "\t\tn_words = random.randint(20, 300)\n",
    "\t\tn_words = (n_words // 10) * 10\n",
    "\t\tlength = random.choice(['very short', 'short', 'medium-length', 'long', 'very long'])\n",
    "\t\tname = random.choice(NAMES)\n",
    "\t\tassert isinstance(name, str)\n",
    "\n",
    "\t\tquestion = random.choice(questions).format(\n",
    "\t\t\ttag_string=tag_string,\n",
    "\t\t\tword_count=n_words,\n",
    "\t\t\tlength=length,\n",
    "\t\t\tname=name,\n",
    "\t\t)\n",
    "\n",
    "\t\tsystem_message = random.choice(system_prompts)\n",
    "\n",
    "\t\texample = EvalSampleMulti(\n",
    "\t\t\tfilehash=filehash,\n",
    "\t\t\tsystem=system_message,\n",
    "\t\t\tquestion=question,\n",
    "\t\t\tquestion_type='questionAnswer',\n",
    "\t\t\tground_truth_knowledge=build_ground_truth_knowledge(filehash, question),\n",
    "\t\t\tresponses_model=CURRENT_RESPONSE_MODEL,  # So we can track it\n",
    "\t\t)\n",
    "\n",
    "\t\texample.add_to_database(cur)\n",
    "\t\tconn.commit()\n",
    "\t\tn_added += 1\n",
    "\n",
    "\t\t# image_data = api.read_image(example.filehash)\n",
    "\t\t# image = Image.open(io.BytesIO(image_data))\n",
    "\t\t# scale = 512 / max(image.size)\n",
    "\t\t# image = image.resize((int(image.size[0] * scale), int(image.size[1] * scale)), Image.LANCZOS)\n",
    "\t\t# display(image)\n",
    "\t\t# print(example.system)\n",
    "\t\t# print(example.question)\n",
    "\n",
    "print(f\"Added {n_added} examples\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "38ba5de6",
   "metadata": {},
   "outputs": [],
   "source": [
    "results = api.search(\"EXISTS(vqa_category) AND EXISTS(questionAnswer)\", [\"attributes\"])\n",
    "assert isinstance(results, list)\n",
    "for result in results:\n",
    "\tassert isinstance(result, tag_machine_api.SearchResultImage) and result.attributes is not None\n",
    "\tvqa_category, = result.attributes['vqa_category'].keys()\n",
    "\tquestionAnswer, = result.attributes['questionAnswer'].keys()\n",
    "\tquestionAnswer = json.loads(questionAnswer)\n",
    "\tvqa_category = vqa_category.strip()\n",
    "\tquestion = questionAnswer['question'].strip()\n",
    "\tanswer = questionAnswer['answer'].strip()\n",
    "\n",
    "\tif vqa_category == '' or question == '' or answer == '' or 'tag_augmentation' not in vqa_category.lower():\n",
    "\t\tcontinue\n",
    "\n",
    "\tprint(question)\n",
    "\tprint(\"###\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "588a8114",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "a49eb1f0",
   "metadata": {},
   "source": [
    "## Evaluate Tag Augmentation Performance"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "215e09f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "existing_filehashes = set()\n",
    "for example in list_db_rankings() + list_db_preferences():\n",
    "\texisting_filehashes.add(example.filehash)\n",
    "\n",
    "remaining_api_images = list(set(API_IMAGES_TO_USE) - existing_filehashes)\n",
    "random.shuffle(remaining_api_images)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0d87402",
   "metadata": {},
   "outputs": [],
   "source": [
    "questions = [\n",
    "\t\"{tag_string}\\n\\nPlease write an SDXL prompt for this image. The above are the booru tags that were associated with the image. The prompt include all of those tags. The prompt will be used by SDXL to try and recreate the image as exactly as possible. So make sure the prompt is complete, accurate, and detailed.\",\n",
    "\t\"{tag_string}\\n\\nI need an SDXL-like prompt.\",\n",
    "\t\"{tag_string}\\n\\nIncorporate as many of these tags into your c aption as possible while staying under {word_count} words and writing without describing mood and just being straightrfoward.\",\n",
    "\t\"Concise straightforward caption using some of the tags below. {word_count} words MAX.\\n\\n{tag_string}\",\n",
    "\t\"\"\"{tag_string}\\n\\nYou have up to {word_count} words to write a caption for this image in a straightforward way without bullshit descriptors like \"This image is...\" or ambiguity.\"\"\",\n",
    "\t\"I need a caption that is straightforward without bullshit, thanks.\\n\\n{tag_string}\\n\\nNot too, too long, like {word_count} words or a bit more?\",\n",
    "\t\"{tag_string}\\n\\nWrite a {length} description for this image.  The above are booru tags for this image.  Use them to enhance the caption you write.\",\n",
    "\t\"{tag_string}\\n\\n\\nWrite an SDXL prompt using the attached image and the above tags\",\n",
    "\t\"\"\"I have these tags for this image {tag_string}\\n\\nI need a caption. No bullshit like \"This image is\", just a straightforward caption within {word_count} words covering all details needed and incorporating the tags when you can.\"\"\",\n",
    "\t\"{tag_string}\\n\\nI need an SDXL-like prompt. Include these tags.\",\n",
    "\t\"\"\"Incorpoating these tags when possible write out a sttraightforward caption. Do not use the word \"featuring\" or anything like it!\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"I would like a straightforward caption.\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nGimmie a straightforward caption\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nI would like you to output  a caption with straighforward styule of writing the caption\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite an accurate caption in a straightforward style.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nBased on these booru tags and the image write out an SDXL prompt\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\n\\nI would like a SDXL prompt written based on the image and the tags above. Less than {word_count} words\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write an SDXL prompt for this image. The above are the booru tags that were associated with the image. The prompt include all of those tags. The prompt will be used by SDXL to try and recreate the image as exactly as possible. So make sure the prompt is complete, accurate, and detailed.\"\"\",\n",
    "\t\"\"\"Describe this image in a non-conversational, objective manner. Do not offer opinions, interpretations, or emotional reactions. Focus on specific, observable details. Avoid casual language or phrasing. Include descriptions of any text, watermarks, or identifying marks. Make sure you use these booru tags to help ({tag_string})\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write a caption for this image. The above are the booru tags that were associated with the image. The caption must use all of those tags. The caption will be used by a test-to-image model to try and recreate the image as exactly as possible. So make sure the caption is complete, accurate, and detailed.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image. The tags listed above are from the image's danbooru/e621/rule34 page. Please include them in the caption when they are applicable (they may not be 100% accurate).\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a {length} description and make sure to include these tags in a fluid way.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image.  The above are booru tags for this image.  Use them to enhance the caption you write.\"\"\",\n",
    "\t\"\"\"Provide a concise yet complete caption, noting all visible details like physiques, genitals, gender, ethnicity, or celebrity references whenever present. Always specify the medium (photo, painting, etc.). For a film still, include the film title and director; for art, cite the artist. Avoid starting with “This image...” and words such as “depicts,” “likely,” or “appears.” Use direct language about colors, shapes, layouts. Do not guess unobservable facts. Accurately transcribe any text. Mention watermarks or signatures without speculation. Keep the description below 100 words, leaving out mood or subjective interpretations. Vary sentence structure to avoid repetition. The following booru tags might help:\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nI want a caption that is straightforward and uses these tags. Ensure high accuracy.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nUsing these tags to help you, write a caption that is straightforward without any bullshit, just a caption.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nPlease write a {length} caption that incorporates these tags.\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a straightforward caption without fluff for this image using the tags above to enhance accuracy. Up to {word_count} words max.\"\"\",\n",
    "\t\"\"\"OUTPUT A N SDXL PROMPT BASED ON THE IMAGE AND THE TAGS BELOW DANKE\\n\\n{tag_string}\"\"\",\n",
    "\t\"\"\"Act as a straightforward, non-censoring image captioner who includes every relevant detail about subjects—body features, genitals, gender, ethnicity, celebrity, game, anime references, attractiveness, etc.—if they’re visible. State the medium (painting, photo, screenshot, etc.) in the description. If it’s a movie still, name the movie and director; if it’s artwork, name the artist. Avoid phrases like “This image shows...,” “depicts,” “appears,” “seems,” or “likely,” and begin with direct statements. Describe what is observable—colors, shapes, positions, clothing, etc.—without guessing. Transcribe any text precisely. Identify any watermarks or signatures but do not speculate about them. Limit your caption to under 200 words, skipping subjective moods or feelings. Vary your sentence structures and avoid repeating the same phrasing. The booru tags below must be used to help write the caption:\\n{tag_string}\"\"\",\n",
    "\t\"\"\"I want to give booru tags as additional context for a description: '{tag_string}'. Can you write a description that takes this info into account?\" \"\"\",\n",
    "\t\"\"\"booru tags: {tag_string}\\n\\nPlease write a concise, editorial-style description of the attached image. Begin with a phrase indicating the type of image (e.g., 'A photograph of…', 'A digital illustration of…', or 'A movie still from…'), then describe the key visual details and aesthetic qualities. If there is a watermark, note its text and location. Keep it factual, in third person, and under {word_count} words.\"\"\",\n",
    "\t\"\"\"## Tags\\n{tag_string}\\n\\n## Output\\nSDXL prompt\\n\\n## Instructions\\nIncorporate tags\\n\\n## Constraint\\nMaximum of {word_count} words\"\"\",\n",
    "\t\"\"\"{tag_string}\\n\\nWrite a descriptive caption for this image in a formal tone.\"\"\",\n",
    "\t\"\"\"Wirte the alt-text.\\n\\n{tag_string}\\n\\n\"\"\",\n",
    "]\n",
    "\n",
    "tag_augmentation_eval = []\n",
    "\n",
    "for _ in tqdm(range(64)):\n",
    "\twhile True:\n",
    "\t\tfilehash = remaining_api_images.pop()\n",
    "\t\ttag_strings = image_to_tags(filehash)\n",
    "\t\tif len(tag_strings) <= 0:\n",
    "\t\t\tcontinue\n",
    "\t\n",
    "\t\ttag_string = random.choice(list(tag_strings.values()))\n",
    "\t\ttags = [x.strip() for x in tag_string.split(\",\") if x.strip()]\n",
    "\t\tif len(tags) < 10:\n",
    "\t\t\tcontinue\n",
    "\t\ttags = random.sample(tags, int(random.uniform(0.2, 1.0) * len(tags)))\n",
    "\t\tassert len(tags) > 0, f\"No tags found for image {filehash.hex()}\"\n",
    "\t\ttag_string = \", \".join(tags)\n",
    "\t\tbreak\n",
    "\n",
    "\tn_words = random.randint(20, 300)\n",
    "\tn_words = (n_words // 10) * 10\n",
    "\tlength = random.choice(['very short', 'short', 'medium-length', 'long', 'very long'])\n",
    "\tname = random.choice(NAMES)\n",
    "\tassert isinstance(name, str)\n",
    "\n",
    "\tquestion = random.choice(questions).format(\n",
    "\t\ttag_string=tag_string,\n",
    "\t\tword_count=n_words,\n",
    "\t\tlength=length,\n",
    "\t\tname=name,\n",
    "\t)\n",
    "\n",
    "\tsystem_message = random.choice(system_prompts)\n",
    "\n",
    "\ttag_augmentation_eval.append({\n",
    "\t\t\"filehash\": filehash,\n",
    "\t\t\"system\": system_message,\n",
    "\t\t\"question\": question,\n",
    "\t})"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4194664e",
   "metadata": {},
   "outputs": [],
   "source": [
    "client_b = openai.Client(base_url=\"http://localhost:5053/v1\", api_key=\"token-abc123\")\n",
    "client_b_model = \"mb3500zp\"\n",
    "\n",
    "for example in tqdm(tag_augmentation_eval):\n",
    "\timage_data = api.read_image(example['filehash'])\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\tresponse_b = client_b.chat.completions.create(\n",
    "\t\tmodel=client_b_model,\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": example['system'],\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": example['question'],\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\ttemperature=0.6,\n",
    "\t\ttop_p=0.9,\n",
    "\t\tmax_tokens=512,\n",
    "\t)\n",
    "\tresponse_b = response_b.choices[0].message.content.strip()\n",
    "\texample['response'] = response_b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9199b6fb",
   "metadata": {},
   "outputs": [],
   "source": [
    "judge_system_prompt = \"\"\"\n",
    "You are an expert AI Response Evaluator. Your task is to meticulously analyze an AI-generated response (`Response`) provided in response to a specific `User Query` and a `User Image`. These responses were generated based on a specific `Original System Prompt` that defined the AI's persona, constraints, and goals.\n",
    "\n",
    "**Your Goal:** Determine objectively how well `Response` performs according to the evaluation criteria outlined below. Provide a clear judgment and a detailed, reasoned justification for your choices.\n",
    "\n",
    "**Input You Will Receive:**\n",
    "\n",
    "1.  `Original System Prompt`: The instructions the AI models were given to generate their responses. Pay close attention to persona, tone, constraints, required format, and specific tasks mentioned here.\n",
    "2.  `User Query`: The specific question or instruction from the user.\n",
    "3.  `User Image`: An image provided by the user, which may be relevant to the query.\n",
    "4.  `Response`: One of the AI-generated responses.\n",
    "\n",
    "**Evaluation Criteria:**\n",
    "\n",
    "1.  **Adherence to Original System Prompt:**\n",
    "    *   Did the response follow ALL instructions, constraints, formatting requirements, persona, and tone specified in the `Original System Prompt`?\n",
    "    *   How well did the response embody the defined persona or role?\n",
    "2.  **Addressing the User Query:**\n",
    "    *   Did the response directly, accurately, and completely answer the `User Query`?\n",
    "    *   Is the response relevant to the user's explicit and implicit needs?\n",
    "3.  **Image Integration:**\n",
    "    *   If an image was provided and relevant, did the response appropriately acknowledge, analyze, or utilize the image content as necessitated by the `User Query` and `Original System Prompt`?\n",
    "4.  **Helpfulness and Usefulness:**\n",
    "    *   How helpful and practical is the response for the user? Does it provide value?\n",
    "5.  **Accuracy and Factual Correctness:**\n",
    "    *   Is the information presented accurate and free from errors? (Acknowledge if you cannot verify).\n",
    "6.  **Clarity, Conciseness, and Structure:**\n",
    "    *   Is the response well-organized, easy to understand, and appropriately concise? Is it free from unnecessary jargon or rambling?\n",
    "\n",
    "**Your Task Steps:**\n",
    "\n",
    "1.  **Understand the Context:** Thoroughly review the `Original System Prompt`, `User Query`, and `User Image`. Understand the *expected* output.\n",
    "2.  **Analyze Response:** Evaluate `Response` against all relevant criteria listed above. Note its strengths and weaknesses.\n",
    "3.  **Score Based on Prompt Adherence:** Assign a score from 1 to 10 based on how well the response adheres to the `Original System Prompt` and addresses the `User Query`. A score of 1 means the response is completely useless, and a score of 10 means the response is perfect.\n",
    "4.  **Score Based on Tag Integration:** If the `User Query` included a list of tags to incorporate in the response, evaluate how well the response integrated those tags. Assign a score from 1 to 10 based on the quality of tag integration. A score of 1 means no tags were integrated, and a score of 10 means all relevant tags were integrated perfectly.\n",
    "5.  **Score Based on Accuracy:** Assign a score from 1 to 10 based on the accuracy of the response. A score of 1 means the response is completely inaccurate, and a score of 10 means the response is completely accurate.\n",
    "\n",
    "**Note:**\n",
    "\n",
    "*   If the user query sets a maximum word count for the response, the response should be within that limit.\n",
    "*   If the user query asks for the response to be \"very short\", \"short\", \"medium-length\", \"long\", or \"very long\" then treat that as an approximate word count limit where very short is ~20 words, short is ~40 words, medium-length is ~60 words, long is ~100 words, and very long is ~200 words.\n",
    "*   If the user includes a list of tags to incorporate into the response, then the AI is expected to skillfully weave those tags into the response in a natural way. The AI should not just list the tags or use them in a way that feels forced or unnatural. The tags do not need to be used in the same order. The tags do not need to be integrated verbatim; synonyms or related terms are acceptable as long as the meaning is preserved. The AI should also not use the word \"tags\" in the response.\n",
    "\n",
    "**Output Format:**\n",
    "\n",
    "Structure your evaluation clearly. You might use headings like:\n",
    "\n",
    "*   **Context Summary:** (Briefly summarize the task set by the prompt/query)\n",
    "*   **Analysis of Response:** (Strengths/Weaknesses against criteria)\n",
    "*   **Scoring:** (Direct point-by-point evaluation scores)\n",
    "*   **Justification:** (Detailed reasoning for the judgment)\n",
    "\n",
    "After your evaluation, at the end of your response, always write a machine parsable output that includes:\n",
    "\n",
    "* Based on your evaluation, the prompt adherence score (1-10) between <prompt_adherence_score> and </prompt_adherence_score>.\n",
    "* Based on your evaluation, the tag integration score (1-10) between <tag_integration_score> and </tag_integration_score>.\n",
    "* Based on your evaluation, the accuracy score (1-10) between <accuracy_score> and </accuracy_score>.\n",
    "\"\"\"\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "judge_user_message = \"\"\"\n",
    "<original_system_prompt>{system_prompt}</original_system_prompt>\n",
    "<user_query>{user_query}</user_query>\n",
    "{response}\n",
    "\n",
    "---\n",
    "\n",
    "**Now, please perform the evaluation based on the instructions provided in your system prompt. Don't forget to clearly mark the scores for the response in <prompt_adherence_score></prompt_adherence_score>, <tag_integration_score></tag_integration_score>, and <accuracy_score></accuracy_score> tags.**\n",
    "\"\"\"\n",
    "\n",
    "client = openai.Client()\n",
    "\n",
    "for example in tqdm(tag_augmentation_eval):\n",
    "\tif 'judge_response' in example:\n",
    "\t\tcontinue\n",
    "\n",
    "\tsystem_prompt = ALPHA_SYSTEM_MESSAGE + \"\\n\" + example['system']\n",
    "\n",
    "\timage_data = api.read_image(example['filehash'])\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\n",
    "\tjudge_response = client.chat.completions.create(\n",
    "\t\tmodel=\"o4-mini\",\n",
    "\t\treasoning_effort=\"medium\",\n",
    "\t\tmessages=[\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"system\",\n",
    "\t\t\t\t\"content\": judge_system_prompt.strip(),\n",
    "\t\t\t},\n",
    "\t\t\t{\n",
    "\t\t\t\t\"role\": \"user\",\n",
    "\t\t\t\t\"content\": [\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"text\",\n",
    "\t\t\t\t\t\t\"text\": judge_user_message.format(system_prompt=system_prompt.strip(), user_query=example['question'].strip(), response=example['response']).strip(),\n",
    "\t\t\t\t\t},\n",
    "\t\t\t\t\t{\n",
    "\t\t\t\t\t\t\"type\": \"image_url\",\n",
    "\t\t\t\t\t\t\"image_url\": {\n",
    "\t\t\t\t\t\t\t\"url\": image_dataurl,\n",
    "\t\t\t\t\t\t},\n",
    "\t\t\t\t\t}\n",
    "\t\t\t\t]\n",
    "\t\t\t},\n",
    "\t\t],\n",
    "\t\t#temperature=0.5,\n",
    "\t)\n",
    "\tjudge_response = judge_response.choices[0].message.content.strip()\n",
    "\texample['judge_response'] = judge_response\n",
    "\t#break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "878ff5f5",
   "metadata": {},
   "outputs": [],
   "source": [
    "import markdown\n",
    "\n",
    "result = \"<html><body>\\n\"\n",
    "\n",
    "for example in tag_augmentation_eval:\n",
    "\tif 'judge_response' not in example:\n",
    "\t\tcontinue\n",
    "\tjudge_response = example['judge_response']\n",
    "\tjudge_response = re.sub(r'(:\\n)([-+*])', r'\\1\\n\\2', judge_response)\n",
    "\tjudge_response = markdown.markdown(judge_response, extensions=[\"extra\", \"nl2br\"])\n",
    "\n",
    "\timage_data = api.read_image(example['filehash'])\n",
    "\timage_mime = magic.from_buffer(image_data, mime=True)\n",
    "\timage_dataurl = f\"data:{image_mime};base64,{base64.b64encode(image_data).decode('utf-8')}\"\n",
    "\tresult += f'<img src=\"{image_dataurl}\" width=\"512\"><br>\\n'\n",
    "\tresult += f\"<p><strong>System:</strong> {example['system']}</p>\\n\"\n",
    "\tresult += f\"<p><strong>Question:</strong> {example['question']}</p>\\n\"\n",
    "\tresult += f\"<p><strong>Response:</strong> {example['response']}</p>\\n\"\n",
    "\tresult += f\"<p><strong>Judge Response:</strong> {judge_response}</p>\\n\"\n",
    "\tresult += \"<hr>\\n\"\n",
    "result += \"</body></html>\\n\"\n",
    "with open(\"tag_augmentation_eval.html\", \"w\") as f:\n",
    "\tf.write(result)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c6b76e15",
   "metadata": {},
   "outputs": [],
   "source": [
    "scores = defaultdict(list)\n",
    "for i, example in enumerate(tag_augmentation_eval):\n",
    "\tif 'judge_response' not in example:\n",
    "\t\tprint(f\"Missing judge response for example {i}\")\n",
    "\t\tcontinue\n",
    "\tjudge_response = example['judge_response']\n",
    "\ttry:\n",
    "\t\tprompt_adherence_score = int(re.search(r'<prompt_adherence_score>(\\d+)</prompt_adherence_score>', judge_response).group(1))\n",
    "\t\ttag_integration_score = int(re.search(r'<tag_integration_score>(\\d+)</tag_integration_score>', judge_response).group(1))\n",
    "\t\taccuracy_score = int(re.search(r'<accuracy_score>(\\d+)</accuracy_score>', judge_response).group(1))\n",
    "\texcept Exception as e:\n",
    "\t\tprint(f\"Error parsing judge response for example {i}: {e}\")\n",
    "\t\tcontinue\n",
    "\tscores['prompt_adherence'].append(prompt_adherence_score)\n",
    "\tscores['tag_integration'].append(tag_integration_score)\n",
    "\tscores['accuracy'].append(accuracy_score)\n",
    "\n",
    "for key, values in scores.items():\n",
    "\tmean = sum(values) / len(values)\n",
    "\tmedian = sorted(values)[len(values) // 2]\n",
    "\tmin_value = min(values)\n",
    "\tmax_value = max(values)\n",
    "\tprint(f\"{key}: mean={mean:.2f}, median={median}, min={min_value}, max={max_value}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e7b34763",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "tmpenv5",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
