{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6540c2aa",
   "metadata": {},
   "outputs": [],
   "source": [
    "import asyncio\n",
    "from pprint import pprint\n",
    "\n",
    "import pandas as pd\n",
    "from sklearn.metrics import confusion_matrix\n",
    "from tensorzero import AsyncTensorZeroGateway, DICLOptimizationConfig\n",
    "from tqdm.asyncio import tqdm"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6b76a0f5",
   "metadata": {},
   "source": [
    "## Load the SMS spam classification dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "af258b1d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data():\n",
    "    df = pd.read_csv(\"data/clean_data.csv\")\n",
    "\n",
    "    # Print dataset statistics\n",
    "    print(\"Training Samples: \", df[df[\"is_train\"] == 1].shape[0])\n",
    "    print(\"Validation Samples: \", df[df[\"is_train\"] == 0].shape[0])\n",
    "    print(f\"Spam {df[df['class'] == 1].shape[0] / df.shape[0]:.2%}\")\n",
    "\n",
    "    # Split dataset into training and validation sets\n",
    "    train_df, val_df = df[df[\"is_train\"] == 1], df[df[\"is_train\"] == 0]\n",
    "\n",
    "    return train_df, val_df\n",
    "\n",
    "\n",
    "train_df, val_df = load_data()\n",
    "train_df.head(5)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8d9d6837",
   "metadata": {},
   "source": [
    "## Set up the TensorZero Gateway client"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "095affdd",
   "metadata": {},
   "outputs": [],
   "source": [
    "t0 = await AsyncTensorZeroGateway.build_embedded(\n",
    "    clickhouse_url=\"http://chuser:chpassword@localhost:8123/tensorzero\",\n",
    "    config_file=\"config/tensorzero.toml\",\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "14f894a4",
   "metadata": {},
   "source": [
    "## Building a dataset for optimization\n",
    "\n",
    "Let's convert the SMS spam dataset to the TensorZero format.\n",
    "For education purposes, let's store the dataset in TensorZero and query it back later.\n",
    "\n",
    "Alternatively, you could use historical inferences and feedback to build samples for optimization.\n",
    "See the documentation for `t0.experimental_list_inferences` for more information.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c31f6f7e",
   "metadata": {},
   "outputs": [],
   "source": [
    "def df_to_tensorzero_datapoints(df):\n",
    "    datapoints = []\n",
    "    for _, row in df.iterrows():\n",
    "        datapoints.append(\n",
    "            {\n",
    "                \"function_name\": \"classify_spam\",\n",
    "                \"input\": {\n",
    "                    \"messages\": [\n",
    "                        {\n",
    "                            \"role\": \"user\",\n",
    "                            \"content\": row[\"text\"],\n",
    "                        }\n",
    "                    ]\n",
    "                },\n",
    "                \"output\": {\"spam\": row[\"class\"] == 1},\n",
    "            }\n",
    "        )\n",
    "    return datapoints\n",
    "\n",
    "\n",
    "async def create_datapoints(t0, df):\n",
    "    # Convert our DataFrame into a list of TensorZero datapoints\n",
    "    datapoints = df_to_tensorzero_datapoints(df)\n",
    "\n",
    "    # Print the first datapoint for sanity checking\n",
    "    pprint(datapoints[0])\n",
    "\n",
    "    # Insert the datapoints into the TensorZero dataset\n",
    "    await t0.create_datapoints(dataset_name=\"spam_train\", datapoints=datapoints)\n",
    "\n",
    "\n",
    "# Convert the training dataset to TensorZero datapoints and store them in TensorZero\n",
    "await create_datapoints(t0, train_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "54ac17c9",
   "metadata": {},
   "source": [
    "## Launch the dynamic in-context learning optimization workflow\n",
    "\n",
    "Let's load the stored datapoints from TensorZero."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "3509bb38",
   "metadata": {},
   "outputs": [],
   "source": [
    "stored_datapoints = await t0.list_datapoints(dataset_name=\"spam_train\", limit=100_000)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9a2b9915",
   "metadata": {},
   "source": [
    "Let's render these datapoints. The stored datapoint is a variant-agnostic representation of the datapoint.\n",
    "Rendering makes the datapoints ready for the optimization workflow.\n",
    "The `experimental_render_samples` function applies templates and schemas, loads input files from object storage, and so on.\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "126a1ee4",
   "metadata": {},
   "outputs": [],
   "source": [
    "train_samples = await t0.experimental_render_samples(\n",
    "    stored_samples=stored_datapoints,\n",
    "    variants={\"classify_spam\": \"baseline\"},\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c2a52419",
   "metadata": {},
   "source": [
    "Finally, let's launch the optimization workflow."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5f3fce44",
   "metadata": {},
   "outputs": [],
   "source": [
    "job_handle = await t0.experimental_launch_optimization(\n",
    "    train_samples=train_samples,\n",
    "    optimization_config=DICLOptimizationConfig(\n",
    "        embedding_model=\"openai::text-embedding-3-small\",\n",
    "        function_name=\"classify_spam\",\n",
    "        variant_name=\"dicl\",\n",
    "        append_to_existing_variants=True,\n",
    "    ),\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "19a4c0bf",
   "metadata": {},
   "source": [
    "## Compare the baseline and the DICL variants"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "39edf399",
   "metadata": {},
   "source": [
    "Let's define a function that runs inference and parses the classification result."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f2c0d4d7",
   "metadata": {},
   "outputs": [],
   "source": [
    "async def infer_spam(t0, text, variant_name=None):\n",
    "    result = await t0.inference(\n",
    "        function_name=\"classify_spam\",\n",
    "        variant_name=variant_name,\n",
    "        input={\n",
    "            \"messages\": [\n",
    "                {\n",
    "                    \"role\": \"user\",\n",
    "                    \"content\": text,\n",
    "                }\n",
    "            ]\n",
    "        },\n",
    "        cache_options={\"enabled\": \"on\"},\n",
    "    )\n",
    "\n",
    "    assert result.output.parsed is not None\n",
    "    is_spam = result.output.parsed.get(\"spam\")\n",
    "    assert isinstance(is_spam, bool)\n",
    "\n",
    "    return is_spam"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2163ca7a",
   "metadata": {},
   "source": [
    "Let's create a semaphore to limit the number of concurrent inference requests to the API.\n",
    "Adjust this value based on your API rate limit."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c7885c35",
   "metadata": {},
   "outputs": [],
   "source": [
    "semaphore = asyncio.Semaphore(50)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bd4491eb",
   "metadata": {},
   "source": [
    "Let's define a function that evaluates a variant's performance on an entire dataset."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9430d675",
   "metadata": {},
   "outputs": [],
   "source": [
    "async def process_row(row, variant_name=None):\n",
    "    async with semaphore:\n",
    "        predicted_is_spam = await infer_spam(t0, row[\"text\"], variant_name=variant_name)\n",
    "        real_is_spam = bool(row[\"class\"])\n",
    "\n",
    "    return (predicted_is_spam, real_is_spam)\n",
    "\n",
    "\n",
    "async def evaluate_variant(df, variant_name):\n",
    "    results = await tqdm.gather(*[process_row(row, variant_name) for _, row in df.iterrows()])\n",
    "\n",
    "    tn, fp, fn, tp = (\n",
    "        confusion_matrix(\n",
    "            y_pred=[x[0] for x in results],\n",
    "            y_true=[x[1] for x in results],\n",
    "        )\n",
    "        .ravel()\n",
    "        .tolist()\n",
    "    )\n",
    "\n",
    "    print(f\"True Positives: {tp}\")\n",
    "    print(f\"True Negatives: {tn}\")\n",
    "    print(f\"False Positives: {fp}\")\n",
    "    print(f\"False Negatives: {fn}\")\n",
    "    print(f\"F1 Score: {2 * tp / (2 * tp + fp + fn):.2f}\")\n",
    "    print(f\"Precision: {tp / (tp + fp):.2f}\")\n",
    "    print(f\"Recall: {tp / (tp + fn):.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1185dbc4",
   "metadata": {},
   "outputs": [],
   "source": [
    "await evaluate_variant(val_df, \"baseline\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6f026fde",
   "metadata": {},
   "outputs": [],
   "source": [
    "await evaluate_variant(val_df, \"dicl\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e07fba34",
   "metadata": {},
   "source": [
    "At the time of writing, the DICL variant materially outperforms the baseline variant:\n",
    "\n",
    "- False Positives: 37 → 21\n",
    "- False Negatives: 7 → 3\n",
    "- F1 Score: 0.85 → 0.91\n",
    "- Precision: 77% → 86%\n",
    "- Recall: 95% → 97%\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": ".venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
