{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import json\n",
    "import pandas as pd\n",
    "from glob import glob\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Found 117 files\n"
     ]
    }
   ],
   "source": [
    "files = glob('results/**/api-results.json', recursive=True)\n",
    "print('Found {} files'.format(len(files)))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.DataFrame(files, columns=['path'])\n",
    "\n",
    "def load_json(path):\n",
    "    with open(path) as f:\n",
    "        return json.load(f)\n",
    "\n",
    "def parse_path(path):\n",
    "    parts = path.split('/')\n",
    "    return {\n",
    "        'dataset': parts[1],\n",
    "        'model': parts[2],\n",
    "        'mode': parts[3],\n",
    "    }\n",
    "\n",
    "df['data'] = df['path'].apply(load_json)\n",
    "df = pd.concat([df, df['path'].apply(parse_path).apply(pd.Series)], axis=1)\n",
    "\n",
    "df[\"accuracy\"] = df[\"data\"].apply(lambda x: x[\"accuracy\"])\n",
    "df = df[df[\"dataset\"] == \"level-1-given-desc\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<style type=\"text/css\">\n",
       "#T_a0114_row0_col2, #T_a0114_row1_col0, #T_a0114_row2_col2, #T_a0114_row3_col0, #T_a0114_row4_col2, #T_a0114_row5_col0, #T_a0114_row6_col2, #T_a0114_row7_col0, #T_a0114_row8_col0, #T_a0114_row9_col1, #T_a0114_row10_col0, #T_a0114_row11_col1, #T_a0114_row12_col1, #T_a0114_row13_col1, #T_a0114_row14_col0, #T_a0114_row15_col0, #T_a0114_row16_col1 {\n",
       "  background-color: lightgreen;\n",
       "}\n",
       "</style>\n",
       "<table id=\"T_a0114\">\n",
       "  <thead>\n",
       "    <tr>\n",
       "      <th class=\"blank\" >&nbsp;</th>\n",
       "      <th class=\"blank level0\" >&nbsp;</th>\n",
       "      <th id=\"T_a0114_level0_col0\" class=\"col_heading level0 col0\" colspan=\"3\">accuracy</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th class=\"blank\" >&nbsp;</th>\n",
       "      <th class=\"index_name level1\" >mode</th>\n",
       "      <th id=\"T_a0114_level1_col0\" class=\"col_heading level1 col0\" >code_as_action</th>\n",
       "      <th id=\"T_a0114_level1_col1\" class=\"col_heading level1 col1\" >json_as_action</th>\n",
       "      <th id=\"T_a0114_level1_col2\" class=\"col_heading level1 col2\" >text_as_action</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th class=\"index_name level0\" >dataset</th>\n",
       "      <th class=\"index_name level1\" >model</th>\n",
       "      <th class=\"blank col0\" >&nbsp;</th>\n",
       "      <th class=\"blank col1\" >&nbsp;</th>\n",
       "      <th class=\"blank col2\" >&nbsp;</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level0_row0\" class=\"row_heading level0 row0\" rowspan=\"17\">level-1-given-desc</th>\n",
       "      <th id=\"T_a0114_level1_row0\" class=\"row_heading level1 row0\" >CodeLlama-13b-Instruct-hf</th>\n",
       "      <td id=\"T_a0114_row0_col0\" class=\"data row0 col0\" >11.78%</td>\n",
       "      <td id=\"T_a0114_row0_col1\" class=\"data row0 col1\" >7.77%</td>\n",
       "      <td id=\"T_a0114_row0_col2\" class=\"data row0 col2\" >14.04%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row1\" class=\"row_heading level1 row1\" >CodeLlama-34b-Instruct-hf</th>\n",
       "      <td id=\"T_a0114_row1_col0\" class=\"data row1 col0\" >17.29%</td>\n",
       "      <td id=\"T_a0114_row1_col1\" class=\"data row1 col1\" >12.03%</td>\n",
       "      <td id=\"T_a0114_row1_col2\" class=\"data row1 col2\" >16.79%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row2\" class=\"row_heading level1 row2\" >CodeLlama-7b-Instruct-hf</th>\n",
       "      <td id=\"T_a0114_row2_col0\" class=\"data row2 col0\" >12.53%</td>\n",
       "      <td id=\"T_a0114_row2_col1\" class=\"data row2 col1\" >12.03%</td>\n",
       "      <td id=\"T_a0114_row2_col2\" class=\"data row2 col2\" >17.04%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row3\" class=\"row_heading level1 row3\" >Llama-2-13b-chat-hf</th>\n",
       "      <td id=\"T_a0114_row3_col0\" class=\"data row3 col0\" >38.10%</td>\n",
       "      <td id=\"T_a0114_row3_col1\" class=\"data row3 col1\" >8.52%</td>\n",
       "      <td id=\"T_a0114_row3_col2\" class=\"data row3 col2\" >37.34%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row4\" class=\"row_heading level1 row4\" >Llama-2-70b-chat-hf</th>\n",
       "      <td id=\"T_a0114_row4_col0\" class=\"data row4 col0\" >35.59%</td>\n",
       "      <td id=\"T_a0114_row4_col1\" class=\"data row4 col1\" >14.29%</td>\n",
       "      <td id=\"T_a0114_row4_col2\" class=\"data row4 col2\" >37.59%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row5\" class=\"row_heading level1 row5\" >Llama-2-7b-chat-hf</th>\n",
       "      <td id=\"T_a0114_row5_col0\" class=\"data row5 col0\" >28.82%</td>\n",
       "      <td id=\"T_a0114_row5_col1\" class=\"data row5 col1\" >11.28%</td>\n",
       "      <td id=\"T_a0114_row5_col2\" class=\"data row5 col2\" >25.81%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row6\" class=\"row_heading level1 row6\" >Mistral-7B-Instruct-v0.1</th>\n",
       "      <td id=\"T_a0114_row6_col0\" class=\"data row6 col0\" >2.51%</td>\n",
       "      <td id=\"T_a0114_row6_col1\" class=\"data row6 col1\" >2.26%</td>\n",
       "      <td id=\"T_a0114_row6_col2\" class=\"data row6 col2\" >3.01%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row7\" class=\"row_heading level1 row7\" >claude-2</th>\n",
       "      <td id=\"T_a0114_row7_col0\" class=\"data row7 col0\" >76.69%</td>\n",
       "      <td id=\"T_a0114_row7_col1\" class=\"data row7 col1\" >59.40%</td>\n",
       "      <td id=\"T_a0114_row7_col2\" class=\"data row7 col2\" >73.68%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row8\" class=\"row_heading level1 row8\" >claude-instant-1</th>\n",
       "      <td id=\"T_a0114_row8_col0\" class=\"data row8 col0\" >75.19%</td>\n",
       "      <td id=\"T_a0114_row8_col1\" class=\"data row8 col1\" >64.91%</td>\n",
       "      <td id=\"T_a0114_row8_col2\" class=\"data row8 col2\" >73.18%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row9\" class=\"row_heading level1 row9\" >gemini-pro</th>\n",
       "      <td id=\"T_a0114_row9_col0\" class=\"data row9 col0\" >70.43%</td>\n",
       "      <td id=\"T_a0114_row9_col1\" class=\"data row9 col1\" >73.18%</td>\n",
       "      <td id=\"T_a0114_row9_col2\" class=\"data row9 col2\" >71.18%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row10\" class=\"row_heading level1 row10\" >gpt-3.5-turbo-0613</th>\n",
       "      <td id=\"T_a0114_row10_col0\" class=\"data row10 col0\" >74.44%</td>\n",
       "      <td id=\"T_a0114_row10_col1\" class=\"data row10 col1\" >73.93%</td>\n",
       "      <td id=\"T_a0114_row10_col2\" class=\"data row10 col2\" >73.43%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row11\" class=\"row_heading level1 row11\" >gpt-3.5-turbo-1106</th>\n",
       "      <td id=\"T_a0114_row11_col0\" class=\"data row11 col0\" >75.44%</td>\n",
       "      <td id=\"T_a0114_row11_col1\" class=\"data row11 col1\" >78.45%</td>\n",
       "      <td id=\"T_a0114_row11_col2\" class=\"data row11 col2\" >73.43%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row12\" class=\"row_heading level1 row12\" >gpt-4-0613</th>\n",
       "      <td id=\"T_a0114_row12_col0\" class=\"data row12 col0\" >75.44%</td>\n",
       "      <td id=\"T_a0114_row12_col1\" class=\"data row12 col1\" >81.95%</td>\n",
       "      <td id=\"T_a0114_row12_col2\" class=\"data row12 col2\" >74.44%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row13\" class=\"row_heading level1 row13\" >gpt-4-1106-preview</th>\n",
       "      <td id=\"T_a0114_row13_col0\" class=\"data row13 col0\" >76.69%</td>\n",
       "      <td id=\"T_a0114_row13_col1\" class=\"data row13 col1\" >82.71%</td>\n",
       "      <td id=\"T_a0114_row13_col2\" class=\"data row13 col2\" >73.43%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row14\" class=\"row_heading level1 row14\" >lemur-70b-chat-v1</th>\n",
       "      <td id=\"T_a0114_row14_col0\" class=\"data row14 col0\" >58.65%</td>\n",
       "      <td id=\"T_a0114_row14_col1\" class=\"data row14 col1\" >46.62%</td>\n",
       "      <td id=\"T_a0114_row14_col2\" class=\"data row14 col2\" >56.14%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row15\" class=\"row_heading level1 row15\" >text-davinci-002</th>\n",
       "      <td id=\"T_a0114_row15_col0\" class=\"data row15 col0\" >69.17%</td>\n",
       "      <td id=\"T_a0114_row15_col1\" class=\"data row15 col1\" >59.65%</td>\n",
       "      <td id=\"T_a0114_row15_col2\" class=\"data row15 col2\" >57.39%</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th id=\"T_a0114_level1_row16\" class=\"row_heading level1 row16\" >text-davinci-003</th>\n",
       "      <td id=\"T_a0114_row16_col0\" class=\"data row16 col0\" >75.44%</td>\n",
       "      <td id=\"T_a0114_row16_col1\" class=\"data row16 col1\" >76.94%</td>\n",
       "      <td id=\"T_a0114_row16_col2\" class=\"data row16 col2\" >69.67%</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n"
      ],
      "text/plain": [
       "<pandas.io.formats.style.Styler at 0x7f022ef569a0>"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "_viz_df = df.set_index(['dataset', 'model', 'mode'])[[\"accuracy\"]]\\\n",
    "    .unstack()\n",
    "\n",
    "_viz_df.style.format(\"{:.2%}\")\\\n",
    "    .highlight_max(color='lightgreen', axis=1)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_929580/2217442111.py:1: PerformanceWarning: dropping on a non-lexsorted multi-index without a level parameter may impact performance.\n",
      "  _viz_latex_df = _viz_df.sort_index().reset_index().drop(columns=['dataset'])\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<style type=\"text/css\">\n",
       "#T_f245c_row0_col0 {\n",
       "  background-color: lightgreen;\n",
       "}\n",
       "</style>\n",
       "<table id=\"T_f245c\">\n",
       "  <thead>\n",
       "    <tr>\n",
       "      <th class=\"blank level0\" >&nbsp;</th>\n",
       "      <th id=\"T_f245c_level0_col0\" class=\"col_heading level0 col0\" >code_as_action</th>\n",
       "      <th id=\"T_f245c_level0_col1\" class=\"col_heading level0 col1\" >json_as_action</th>\n",
       "      <th id=\"T_f245c_level0_col2\" class=\"col_heading level0 col2\" >text_as_action</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th id=\"T_f245c_level0_row0\" class=\"row_heading level0 row0\" >0</th>\n",
       "      <td id=\"T_f245c_row0_col0\" class=\"data row0 col0\" >8</td>\n",
       "      <td id=\"T_f245c_row0_col1\" class=\"data row0 col1\" >5</td>\n",
       "      <td id=\"T_f245c_row0_col2\" class=\"data row0 col2\" >4</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n"
      ],
      "text/plain": [
       "<pandas.io.formats.style.Styler at 0x7f022d7c5b50>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\\begin{tabular}{lrrr}\n",
      "\\toprule\n",
      "{} &  code_as_action &  json_as_action &  text_as_action \\\\\n",
      "\\midrule\n",
      "0 &               8 &               5 &               4 \\\\\n",
      "\\bottomrule\n",
      "\\end{tabular}\n",
      "\n",
      "\\begin{tabular}{lrrr}\n",
      "\\toprule\n",
      "{} & \\multicolumn{3}{l}{accuracy} \\\\\n",
      "mode &       code_as_action &       json_as_action &       text_as_action \\\\\n",
      "model                              &                      &                      &                      \\\\\n",
      "\\midrule\n",
      "\\texttt{CodeLlama-13b-Instruct-hf} &  \\underline{$11.78$} &               $7.77$ &     $\\mathbf{14.04}$ \\\\\n",
      "\\texttt{CodeLlama-34b-Instruct-hf} &     $\\mathbf{17.29}$ &              $12.03$ &  \\underline{$16.79$} \\\\\n",
      "\\texttt{CodeLlama-7b-Instruct-hf}  &  \\underline{$12.53$} &              $12.03$ &     $\\mathbf{17.04}$ \\\\\n",
      "\\texttt{Llama-2-13b-chat-hf}       &     $\\mathbf{38.10}$ &               $8.52$ &  \\underline{$37.34$} \\\\\n",
      "\\texttt{Llama-2-70b-chat-hf}       &  \\underline{$35.59$} &              $14.29$ &     $\\mathbf{37.59}$ \\\\\n",
      "\\texttt{Llama-2-7b-chat-hf}        &     $\\mathbf{28.82}$ &              $11.28$ &  \\underline{$25.81$} \\\\\n",
      "\\texttt{Mistral-7B-Instruct-v0.1}  &   \\underline{$2.51$} &               $2.26$ &      $\\mathbf{3.01}$ \\\\\n",
      "\\texttt{claude-2}                  &     $\\mathbf{76.69}$ &              $59.40$ &  \\underline{$73.68$} \\\\\n",
      "\\texttt{claude-instant-1}          &     $\\mathbf{75.19}$ &              $64.91$ &  \\underline{$73.18$} \\\\\n",
      "\\texttt{gemini-pro}                &              $70.43$ &     $\\mathbf{73.18}$ &  \\underline{$71.18$} \\\\\n",
      "\\texttt{gpt-3.5-turbo-0613}        &     $\\mathbf{74.44}$ &  \\underline{$73.93$} &              $73.43$ \\\\\n",
      "\\texttt{gpt-3.5-turbo-1106}        &  \\underline{$75.44$} &     $\\mathbf{78.45}$ &              $73.43$ \\\\\n",
      "\\texttt{gpt-4-0613}                &  \\underline{$75.44$} &     $\\mathbf{81.95}$ &              $74.44$ \\\\\n",
      "\\texttt{gpt-4-1106-preview}        &  \\underline{$76.69$} &     $\\mathbf{82.71}$ &              $73.43$ \\\\\n",
      "\\texttt{lemur-70b-chat-v1}         &     $\\mathbf{58.65}$ &              $46.62$ &  \\underline{$56.14$} \\\\\n",
      "\\texttt{text-davinci-002}          &     $\\mathbf{69.17}$ &  \\underline{$59.65$} &              $57.39$ \\\\\n",
      "\\texttt{text-davinci-003}          &  \\underline{$75.44$} &     $\\mathbf{76.94}$ &              $69.67$ \\\\\n",
      "\\bottomrule\n",
      "\\end{tabular}\n",
      "\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/tmp/ipykernel_929580/2217442111.py:28: FutureWarning: In future versions `DataFrame.to_latex` is expected to utilise the base implementation of `Styler.to_latex` for formatting and rendering. The arguments signature may therefore change. It is recommended instead to use `DataFrame.style.to_latex` which also contains additional functionality.\n",
      "  print(_last_row.to_frame().T.to_latex(escape=False))\n",
      "/tmp/ipykernel_929580/2217442111.py:34: FutureWarning: In future versions `DataFrame.to_latex` is expected to utilise the base implementation of `Styler.to_latex` for formatting and rendering. The arguments signature may therefore change. It is recommended instead to use `DataFrame.style.to_latex` which also contains additional functionality.\n",
      "  _viz_latex_df.set_index(['model']).to_latex(escape=False, index=True, column_format='lrrr')\n"
     ]
    }
   ],
   "source": [
    "_viz_latex_df = _viz_df.sort_index().reset_index().drop(columns=['dataset'])\n",
    "# make all accuracies percentages and bold the best one, and \\underline the second best\n",
    "    # .apply(lambda x: ['\\\\textbf{' + str(v) + '}' if v == x.max() else v for v in x], axis=1)\\\n",
    "    # .apply(lambda x: ['\\\\underline{' + str(v) + '}' if v == x.max() else v for v in x], axis=1)\\\n",
    "    # .set_properties(**{'text-align': 'center'})\\\n",
    "    # .set_table_styles([dict(selector='th', props=[('text-align', 'center')])])\\\n",
    "    # .hide_index()\\\n",
    "    # .render()\n",
    "def _process_row(row):\n",
    "    sorted_values = sorted(row, reverse=True)\n",
    "    max_value = sorted_values[0]\n",
    "    second_max_value = sorted_values[1]\n",
    "    \n",
    "    def _format_value(value):\n",
    "        formatted_value = \"{:.2f}\".format(value * 100)\n",
    "        if value == max_value:\n",
    "            return '$\\\\mathbf{' + str(formatted_value) + '}$'\n",
    "        elif value == second_max_value:\n",
    "            return '\\\\underline{$' + str(formatted_value) + '$}'\n",
    "        else:\n",
    "            return \"$\" + str(formatted_value) + \"$\"\n",
    "    row = row.apply(_format_value)\n",
    "    return row\n",
    "\n",
    "\n",
    "_last_row = _viz_latex_df[\"accuracy\"].idxmax(axis=1).value_counts()\n",
    "display(_last_row.to_frame().T.style.highlight_max(color='lightgreen', axis=1))\n",
    "print(_last_row.to_frame().T.to_latex(escape=False))\n",
    "\n",
    "_viz_latex_df[\"accuracy\"] = _viz_latex_df[\"accuracy\"].apply(_process_row, axis=1)\n",
    "_viz_latex_df[\"model\"] = _viz_latex_df[\"model\"].apply(lambda x: f\"\\\\texttt{{{x}}}\")\n",
    "\n",
    "print(\n",
    "    _viz_latex_df.set_index(['model']).to_latex(escape=False, index=True, column_format='lrrr')\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead tr th {\n",
       "        text-align: left;\n",
       "    }\n",
       "\n",
       "    .dataframe thead tr:last-of-type th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th colspan=\"3\" halign=\"left\">accuracy</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th></th>\n",
       "      <th>mode</th>\n",
       "      <th>code_as_action</th>\n",
       "      <th>json_as_action</th>\n",
       "      <th>text_as_action</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>dataset</th>\n",
       "      <th>model</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th rowspan=\"17\" valign=\"top\">level-1-given-desc</th>\n",
       "      <th>CodeLlama-13b-Instruct-hf</th>\n",
       "      <td>11.78</td>\n",
       "      <td>7.77</td>\n",
       "      <td>14.04</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>CodeLlama-34b-Instruct-hf</th>\n",
       "      <td>17.29</td>\n",
       "      <td>12.03</td>\n",
       "      <td>16.79</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>CodeLlama-7b-Instruct-hf</th>\n",
       "      <td>12.53</td>\n",
       "      <td>12.03</td>\n",
       "      <td>17.04</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Llama-2-13b-chat-hf</th>\n",
       "      <td>38.10</td>\n",
       "      <td>8.52</td>\n",
       "      <td>37.34</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Llama-2-70b-chat-hf</th>\n",
       "      <td>35.59</td>\n",
       "      <td>14.29</td>\n",
       "      <td>37.59</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Llama-2-7b-chat-hf</th>\n",
       "      <td>28.82</td>\n",
       "      <td>11.28</td>\n",
       "      <td>25.81</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Mistral-7B-Instruct-v0.1</th>\n",
       "      <td>2.51</td>\n",
       "      <td>2.26</td>\n",
       "      <td>3.01</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>claude-2</th>\n",
       "      <td>76.69</td>\n",
       "      <td>59.40</td>\n",
       "      <td>73.68</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>claude-instant-1</th>\n",
       "      <td>75.19</td>\n",
       "      <td>64.91</td>\n",
       "      <td>73.18</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>gemini-pro</th>\n",
       "      <td>70.43</td>\n",
       "      <td>73.18</td>\n",
       "      <td>71.18</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>gpt-3.5-turbo-0613</th>\n",
       "      <td>74.44</td>\n",
       "      <td>73.93</td>\n",
       "      <td>73.43</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>gpt-3.5-turbo-1106</th>\n",
       "      <td>75.44</td>\n",
       "      <td>78.45</td>\n",
       "      <td>73.43</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>gpt-4-0613</th>\n",
       "      <td>75.44</td>\n",
       "      <td>81.95</td>\n",
       "      <td>74.44</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>gpt-4-1106-preview</th>\n",
       "      <td>76.69</td>\n",
       "      <td>82.71</td>\n",
       "      <td>73.43</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>lemur-70b-chat-v1</th>\n",
       "      <td>58.65</td>\n",
       "      <td>46.62</td>\n",
       "      <td>56.14</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>text-davinci-002</th>\n",
       "      <td>69.17</td>\n",
       "      <td>59.65</td>\n",
       "      <td>57.39</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>text-davinci-003</th>\n",
       "      <td>75.44</td>\n",
       "      <td>76.94</td>\n",
       "      <td>69.67</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                   accuracy                 \\\n",
       "mode                                         code_as_action json_as_action   \n",
       "dataset            model                                                     \n",
       "level-1-given-desc CodeLlama-13b-Instruct-hf          11.78           7.77   \n",
       "                   CodeLlama-34b-Instruct-hf          17.29          12.03   \n",
       "                   CodeLlama-7b-Instruct-hf           12.53          12.03   \n",
       "                   Llama-2-13b-chat-hf                38.10           8.52   \n",
       "                   Llama-2-70b-chat-hf                35.59          14.29   \n",
       "                   Llama-2-7b-chat-hf                 28.82          11.28   \n",
       "                   Mistral-7B-Instruct-v0.1            2.51           2.26   \n",
       "                   claude-2                           76.69          59.40   \n",
       "                   claude-instant-1                   75.19          64.91   \n",
       "                   gemini-pro                         70.43          73.18   \n",
       "                   gpt-3.5-turbo-0613                 74.44          73.93   \n",
       "                   gpt-3.5-turbo-1106                 75.44          78.45   \n",
       "                   gpt-4-0613                         75.44          81.95   \n",
       "                   gpt-4-1106-preview                 76.69          82.71   \n",
       "                   lemur-70b-chat-v1                  58.65          46.62   \n",
       "                   text-davinci-002                   69.17          59.65   \n",
       "                   text-davinci-003                   75.44          76.94   \n",
       "\n",
       "                                                             \n",
       "mode                                         text_as_action  \n",
       "dataset            model                                     \n",
       "level-1-given-desc CodeLlama-13b-Instruct-hf          14.04  \n",
       "                   CodeLlama-34b-Instruct-hf          16.79  \n",
       "                   CodeLlama-7b-Instruct-hf           17.04  \n",
       "                   Llama-2-13b-chat-hf                37.34  \n",
       "                   Llama-2-70b-chat-hf                37.59  \n",
       "                   Llama-2-7b-chat-hf                 25.81  \n",
       "                   Mistral-7B-Instruct-v0.1            3.01  \n",
       "                   claude-2                           73.68  \n",
       "                   claude-instant-1                   73.18  \n",
       "                   gemini-pro                         71.18  \n",
       "                   gpt-3.5-turbo-0613                 73.43  \n",
       "                   gpt-3.5-turbo-1106                 73.43  \n",
       "                   gpt-4-0613                         74.44  \n",
       "                   gpt-4-1106-preview                 73.43  \n",
       "                   lemur-70b-chat-v1                  56.14  \n",
       "                   text-davinci-002                   57.39  \n",
       "                   text-davinci-003                   69.67  "
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# format everything as a percentage\n",
    "(df.set_index(['dataset', 'model', 'mode'])[[\"accuracy\"]].unstack() * 100).round(2)\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "llm-agent",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
