{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/nategruver/opt/anaconda3/lib/python3.8/site-packages/pandas/core/computation/expressions.py:20: UserWarning: Pandas requires version '2.7.3' or newer of 'numexpr' (version '2.7.1' currently installed).\n",
      "  from pandas.core.computation.check import NUMEXPR_INSTALLED\n"
     ]
    },
    {
     "ename": "FileNotFoundError",
     "evalue": "[Errno 2] No such file or directory: 'eval/small_context_tuned/AirPassengersDataset.pkl'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mFileNotFoundError\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb Cell 1\u001b[0m line \u001b[0;36m5\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=46'>47</a>\u001b[0m datasets \u001b[39m=\u001b[39m get_datasets()\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=47'>48</a>\u001b[0m \u001b[39mfor\u001b[39;00m dsname,(train,test) \u001b[39min\u001b[39;00m datasets\u001b[39m.\u001b[39mitems():\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=48'>49</a>\u001b[0m     \u001b[39m# if dsname == \"SunspotsDataset\":\u001b[39;00m\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=49'>50</a>\u001b[0m     \u001b[39m#     continue\u001b[39;00m\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=50'>51</a>\u001b[0m \n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=51'>52</a>\u001b[0m     \u001b[39m# print(dsname)\u001b[39;00m\n\u001b[0;32m---> <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=52'>53</a>\u001b[0m     \u001b[39mwith\u001b[39;00m \u001b[39mopen\u001b[39;49m(\u001b[39mf\u001b[39;49m\u001b[39m'\u001b[39;49m\u001b[39meval/small_context_tuned/\u001b[39;49m\u001b[39m{\u001b[39;49;00mdsname\u001b[39m}\u001b[39;49;00m\u001b[39m.pkl\u001b[39;49m\u001b[39m'\u001b[39;49m,\u001b[39m'\u001b[39;49m\u001b[39mrb\u001b[39;49m\u001b[39m'\u001b[39;49m) \u001b[39mas\u001b[39;00m f:\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=53'>54</a>\u001b[0m         data_dict \u001b[39m=\u001b[39m pickle\u001b[39m.\u001b[39mload(f)\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=54'>55</a>\u001b[0m     \u001b[39mfor\u001b[39;00m model_name,preds \u001b[39min\u001b[39;00m data_dict\u001b[39m.\u001b[39mitems():\n\u001b[1;32m     <a href='vscode-notebook-cell:/Users/nategruver/Desktop/shell_dir/time-series-lm/figures/figure_1.ipynb#W1sZmlsZQ%3D%3D?line=55'>56</a>\u001b[0m         \u001b[39m# print(f\"\\t{model_name}\")\u001b[39;00m\n",
      "File \u001b[0;32m~/opt/anaconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py:284\u001b[0m, in \u001b[0;36m_modified_open\u001b[0;34m(file, *args, **kwargs)\u001b[0m\n\u001b[1;32m    277\u001b[0m \u001b[39mif\u001b[39;00m file \u001b[39min\u001b[39;00m {\u001b[39m0\u001b[39m, \u001b[39m1\u001b[39m, \u001b[39m2\u001b[39m}:\n\u001b[1;32m    278\u001b[0m     \u001b[39mraise\u001b[39;00m \u001b[39mValueError\u001b[39;00m(\n\u001b[1;32m    279\u001b[0m         \u001b[39mf\u001b[39m\u001b[39m\"\u001b[39m\u001b[39mIPython won\u001b[39m\u001b[39m'\u001b[39m\u001b[39mt let you open fd=\u001b[39m\u001b[39m{\u001b[39;00mfile\u001b[39m}\u001b[39;00m\u001b[39m by default \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    280\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39mas it is likely to crash IPython. If you know what you are doing, \u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    281\u001b[0m         \u001b[39m\"\u001b[39m\u001b[39myou can use builtins\u001b[39m\u001b[39m'\u001b[39m\u001b[39m open.\u001b[39m\u001b[39m\"\u001b[39m\n\u001b[1;32m    282\u001b[0m     )\n\u001b[0;32m--> 284\u001b[0m \u001b[39mreturn\u001b[39;00m io_open(file, \u001b[39m*\u001b[39;49margs, \u001b[39m*\u001b[39;49m\u001b[39m*\u001b[39;49mkwargs)\n",
      "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: 'eval/small_context_tuned/AirPassengersDataset.pkl'"
     ]
    }
   ],
   "source": [
    "import pickle\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "from collections import defaultdict\n",
    "\n",
    "import sys\n",
    "sys.path.append(\"..\")\n",
    "from data.small_context import get_datasets\n",
    "from data.metrics import calculate_crps\n",
    "\n",
    "sns.set(style=\"whitegrid\", font_scale=1)\n",
    "\n",
    "name_map = {\n",
    "    \"gp\": \"SM-GP\",\n",
    "    \"arima\": \"ARIMA\",\n",
    "    \"TCN\": \"TCN\",\n",
    "    \"N-BEATS\": \"N-BEATS\",\n",
    "    \"N-HiTS\": \"N-HiTS\",\n",
    "    'text-davinci-003':'GPT-3',\n",
    "    'LLaMA7B': 'LLaMA7B',\n",
    "    'LLaMA13B': 'LLaMA13B',\n",
    "    'LLaMA30B': 'LLaMA30B', \n",
    "    'LLaMA70B': 'LLaMA70B',\n",
    "    \"llama1_7B\": \"LLaMA 7B\",\n",
    "    \"llama1_13B\": \"LLaMA 13B\",\n",
    "    \"llama1_30B\": \"LLaMA 30B\",\n",
    "    \"llama1_70B\": \"LLaMA 70B\",\n",
    "    \"llama2_7B\": \"LLaMA-2 7B\",\n",
    "    \"llama2_13B\": \"LLaMA-2 13B\",\n",
    "    \"llama2_70B\": \"LLaMA-2 70B\",\n",
    "    \"llama2_7B_chat\": \"LLaMA-2 7B (chat)\",\n",
    "    \"llama2_13B_chat\": \"LLaMA-2 13B (chat)\",\n",
    "    \"llama2_70B_chat\": \"LLaMA-2 70B (chat)\",\n",
    "}\n",
    "\n",
    "hue_order = ['N-BEATS','SM-GP','TCN','N-HiTS','ARIMA']#, 'LLaMA70B']\n",
    "hue_order += [\"LLaMA-2 70B\", 'GPT-3']\n",
    "# hue_order += [\n",
    "#     \"LLaMA 7B\", \"LLaMA-2 7B\", \"LLaMA-2 7B (chat)\",\n",
    "#     \"LLaMA 13B\", \"LLaMA-2 13B\", \"LLaMA-2 13B (chat)\",\n",
    "#     \"LLaMA 30B\", \"LLaMA 70B\", \"LLaMA-2 70B\", \"LLaMA-2 70B (chat)\"\n",
    "# ]\n",
    "nlls = defaultdict(list)\n",
    "crps = defaultdict(list)\n",
    "mae = defaultdict(list)\n",
    "datasets = get_datasets()\n",
    "for dsname,(train,test) in datasets.items():\n",
    "    # if dsname == \"SunspotsDataset\":\n",
    "    #     continue\n",
    "\n",
    "    # print(dsname)\n",
    "    with open(f'eval/small_context_tuned/{dsname}.pkl','rb') as f:\n",
    "        data_dict = pickle.load(f)\n",
    "    for model_name,preds in data_dict.items():\n",
    "        # print(f\"\\t{model_name}\")\n",
    "        if model_name in ['ada','babbage','curie']:\n",
    "            continue\n",
    "        if 'NLL/D' not in preds:\n",
    "            continue\n",
    "        nll = preds['NLL/D']\n",
    "        if model_name=='text-davinci-003-tuned':\n",
    "            model_name='GPT3'\n",
    "\n",
    "        if type(preds['samples']) == np.ndarray:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'][:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "        else:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'].values[:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "\n",
    "llama_models = [\n",
    "    \"llama1_7B\",\n",
    "    \"llama2_7B\",\n",
    "    \"llama2_7B_chat\",\n",
    "    \"llama1_13B\",\n",
    "    \"llama2_13B\",\n",
    "    \"llama2_13B_chat\",\n",
    "    \"llama1_30B\",\n",
    "    \"llama1_70B\",\n",
    "    \"llama2_70B\",\n",
    "    \"llama2_70B_chat\",\n",
    "]\n",
    "for dsname,(train,test) in datasets.items():\n",
    "    # if dsname == \"SunspotsDataset\":\n",
    "    #     continue\n",
    "\n",
    "    for model_name in llama_models:\n",
    "        if model_name in ['llama2_70B', 'llama2_70B_chat']:\n",
    "            fn = f'eval/llama_70B_sweep_sample/{model_name}/darts-{dsname}/1.0_0.9_0.99_0.3_3_,_.pkl'\n",
    "        else:\n",
    "            fn = f'eval/llama_2_results/{model_name}/darts-{dsname}/0.4_0.9_0.99_0.3_3_,_.pkl'\n",
    "        with open(fn,'rb') as f:\n",
    "            data_dict = pickle.load(f)\n",
    "\n",
    "        preds = data_dict#[model_name]\n",
    "\n",
    "        if 'NLL/D' not in preds:\n",
    "            continue\n",
    "        nll = preds['NLL/D']\n",
    "\n",
    "        if type(preds['samples']) == np.ndarray:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'][:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "        else:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'].values[:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "\n",
    "\n",
    "nlls = {k:np.array(v) for k,v in nlls.items()}\n",
    "crps = {k:np.array(v) for k,v in crps.items()}\n",
    "mae = {k:np.array(v) for k,v in mae.items()}\n",
    "\n",
    "print({k: len(v) for k,v in nlls.items()})\n",
    "\n",
    "# Update dataset keys by removing 'Dataset' substring\n",
    "dataset_keys = [key.replace('Dataset', '') for key in datasets.keys()]\n",
    "\n",
    "fig, ax = plt.subplots(1, 1, figsize=(3.5, 2.2))\n",
    "dfs = [pd.DataFrame({'Dataset':dataset_keys,'CRPS':v,'Type':k}) for k,v in crps.items()]\n",
    "df = pd.concat(dfs)\n",
    "\n",
    "df['Type'] = df['Type'].apply(lambda x: name_map[x])\n",
    "\n",
    "palette = sns.color_palette('Dark2', len(hue_order))\n",
    "palette = palette[:2] + palette[3:-1] + ['#a60355'] + palette[2:3]\n",
    "\n",
    "sns.barplot(\n",
    "    # x='Dataset',\n",
    "    order=hue_order,#['SM-GP','N-BEATS','TCN','N-HiTS','ARIMA','GPT-3', 'LLaMA70B'],\n",
    "    y='Type',\n",
    "    x='CRPS',\n",
    "    # hue='Type',\n",
    "    data=df,\n",
    "    ax=ax, \n",
    "    palette=palette,\n",
    "    errwidth=1,\n",
    "    errorbar='se',\n",
    ")\n",
    "\n",
    "#color the first 5 bars grey\n",
    "for i in range(5):\n",
    "    ax.patches[i].set_facecolor('#D3D3D3')\n",
    "    ax.patches[i].set_edgecolor('grey')\n",
    "    ax.patches[i].set_linewidth(0.5)\n",
    "\n",
    "# ax.set_title('Aggregated', pad=15)\n",
    "ax.set_ylabel('')\n",
    "ax.legend(loc='upper right', frameon=True, framealpha=0.7)\n",
    "# plt.setp(ax.get_xticklabels(), rotation=45)\n",
    "plt.setp(ax.get_yticklabels())\n",
    "#remove space between x tick labels and x axis\n",
    "ax.tick_params(axis='x', which='major', pad=0)\n",
    "ax.set_xticklabels(ax.get_xticklabels(), fontsize=9)\n",
    "\n",
    "ax.set_xlabel('CRPS', fontsize=14)  # Remove x-axis label\n",
    "ax.set_xlim((0, 0.2))\n",
    "\n",
    "ax.get_legend().remove()\n",
    "handles, labels = ax.get_legend_handles_labels()\n",
    "ax.legend(\n",
    "    handles=handles,\n",
    "    labels=labels,\n",
    "    markerscale=1.5,\n",
    "    bbox_to_anchor=(1.05, 1),\n",
    "    loc='upper left',\n",
    "    borderaxespad=0.3,\n",
    ")\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.savefig('small_DARTS_CRPS.png', dpi=300, bbox_inches='tight')\n",
    "plt.savefig('small_DARTS_CRPS.pdf', bbox_inches='tight')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df.to_csv(\"crps_top_fig.csv\", index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "sns.set(style=\"whitegrid\", font_scale=1)\n",
    "\n",
    "old_names = [\n",
    "    'ada',\n",
    "    'babbage',\n",
    "    'curie',\n",
    "    'text-davinci-003'\n",
    "]\n",
    "name_map = {\n",
    "    # \"arima\": \"ARIMA\",\n",
    "    'ada': \"Ada\",\n",
    "    'babbage': \"Babbage\",\n",
    "    'curie': \"Curie\",\n",
    "    'text-davinci-003':'Davinci',\n",
    "    # 'LLaMA7B':'LLaMA7B',\n",
    "    # 'LLaMA13B':'LLaMA13B',\n",
    "    # 'LLaMA30B':'LLaMA30B',\n",
    "    # 'LLaMA70B':'LLaMA70B',\n",
    "    \"llama1_7B\": \"LLaMA 7B\",\n",
    "    \"llama1_13B\": \"LLaMA 13B\",\n",
    "    \"llama1_30B\": \"LLaMA 30B\",\n",
    "    \"llama1_70B\": \"LLaMA 70B\",\n",
    "    \"llama2_7B\": \"LLaMA-2 7B\",\n",
    "    \"llama2_13B\": \"LLaMA-2 13B\",\n",
    "    \"llama2_70B\": \"LLaMA-2 70B\",\n",
    "    \"llama2_7B_chat\": \"LLaMA-2 7B (chat)\",\n",
    "    \"llama2_13B_chat\": \"LLaMA-2 13B (chat)\",\n",
    "    \"llama2_70B_chat\": \"LLaMA-2 70B (chat)\",\n",
    "}\n",
    "hue_order = ['SM-GP','N-BEATS','TCN','N-HiTS','ARIMA','GPT-3']#, 'LLaMA7B', 'LLaMA13B', 'LLaMA30B', 'LLaMA70B']\n",
    "nlls = defaultdict(list)\n",
    "crps = defaultdict(list)\n",
    "mae = defaultdict(list)\n",
    "datasets = get_datasets()\n",
    "for dsname,(train,test) in datasets.items():\n",
    "    with open(f'eval/small_context_tuned/{dsname}.pkl','rb') as f:\n",
    "        data_dict = pickle.load(f)\n",
    "\n",
    "    for model_name in old_names:\n",
    "\n",
    "        preds = data_dict[model_name]\n",
    "\n",
    "        if 'NLL/D' not in preds:\n",
    "            continue\n",
    "        nll = preds['NLL/D']\n",
    "        if model_name=='text-davinci-003-tuned':\n",
    "            model_name='GPT3'\n",
    "\n",
    "        if type(preds['samples']) == np.ndarray:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'][:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "        else:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'].values[:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "\n",
    "llama_models = [\n",
    "    \"llama1_7B\",\n",
    "    \"llama2_7B\",\n",
    "    \"llama2_7B_chat\",\n",
    "    \"llama1_13B\",\n",
    "    \"llama2_13B\",\n",
    "    \"llama2_13B_chat\",\n",
    "    \"llama1_30B\",\n",
    "    \"llama1_70B\",\n",
    "    \"llama2_70B\",\n",
    "    \"llama2_70B_chat\",\n",
    "]\n",
    "for dsname,(train,test) in datasets.items():\n",
    "    for model_name in llama_models:\n",
    "        if model_name in ['llama2_70B', 'llama2_70B_chat']:\n",
    "            fn = f'eval/llama_70B_sweep_sample/{model_name}/darts-{dsname}/1.0_0.9_0.99_0.3_3_,_.pkl'\n",
    "        else:\n",
    "            fn = f'eval/llama_2_results/{model_name}/darts-{dsname}/0.4_0.9_0.99_0.3_3_,_.pkl'\n",
    "        with open(fn,'rb') as f:\n",
    "            data_dict = pickle.load(f)\n",
    "\n",
    "        preds = data_dict#[model_name]\n",
    "\n",
    "        if 'NLL/D' not in preds:\n",
    "            continue\n",
    "        nll = preds['NLL/D']\n",
    "\n",
    "        if type(preds['samples']) == np.ndarray:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'][:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "        else:\n",
    "            nlls[model_name].append(nll)\n",
    "            crps[model_name].append(calculate_crps(test.values,preds['samples'].values[:10],10))\n",
    "            tmae = np.abs(test.values-preds['median']).mean()/np.abs(test.values).mean()\n",
    "            mae[model_name].append(tmae)\n",
    "\n",
    "nlls = {k:np.array(v) for k,v in nlls.items()}\n",
    "crps = {k:np.array(v) for k,v in crps.items()}\n",
    "mae = {k:np.array(v) for k,v in mae.items()}\n",
    "\n",
    "# Update dataset keys by removing 'Dataset' substring\n",
    "dataset_keys = [key.replace('Dataset', '') for key in datasets.keys()]\n",
    "\n",
    "# fig, ax = plt.subplots(1, 1, figsize=(3, 3))\n",
    "# dfs = [pd.DataFrame({'Dataset':dataset_keys,'NLL/D':v,'Type':k}) for k,v in nlls.items()]\n",
    "# df = pd.concat(dfs)\n",
    "\n",
    "# df['Type'] = df['Type'].apply(lambda x: name_map[x])\n",
    "\n",
    "palette = sns.color_palette('Dark2', len(hue_order))\n",
    "palette = palette[:2] + palette[3:] + palette[2:3]\n",
    "\n",
    "mmlu_numbers = {\n",
    "    'ada': 0.238,\n",
    "    'babbage': 0.235,\n",
    "    'curie': 0.237,\n",
    "    'text-davinci-003': 0.569, \n",
    "    'cohere-medium': 0.279,\n",
    "    'cohere-base-light': 0.264, \n",
    "    'cohere-base': 0.324,\n",
    "    'cohere-command-nightly': 0.452,\n",
    "    'forefront-gpt-j-6b-vanilla': 0.249,\n",
    "    \"alephalpha-luminous-extended\": 0.321, \n",
    "    \"alephalpha-luminous-supreme\": 0.452,   \n",
    "    # 'ARIMA': 0.,\n",
    "    'cohere-command-light': 0.264,\n",
    "    'cohere-command-light-nightly': 0.264,\n",
    "    'forefront-gpt-neox-20b-vanilla': 0.24,\n",
    "    # 'LLaMA7B': 0.351, \n",
    "    # 'LLaMA13B': 0.469, \n",
    "    # 'LLaMA30B': 0.578, \n",
    "    # 'LLaMA70B': 0.634,\n",
    "    'llama1_7B': 0.351,\n",
    "    'llama2_7B': 0.453,\n",
    "    # 'llama2_7B_chat': 0.469,\n",
    "    'llama1_13B': 0.469,\n",
    "    'llama2_13B': 0.548,\n",
    "    # 'llama2_13B_chat': 0.469,\n",
    "    'llama1_30B': 0.578,\n",
    "    'llama1_70B': 0.634,\n",
    "    'llama2_70B': 0.689,\n",
    "    # 'llama2_70B_chat': 0.578,\n",
    "}\n",
    "\n",
    "df = []\n",
    "for k,nll in nlls.items():\n",
    "    if \"chat\" in k:\n",
    "        continue\n",
    "    print(k)\n",
    "    _crps, _mae = crps[k], mae[k]\n",
    "    # print(k,nll)\n",
    "    # print(list(datasets.keys()))\n",
    "    # for i in range(len(nll)):\n",
    "    d = {\n",
    "        # 'Dataset':list(datasets.keys()),\n",
    "        'MAE': np.mean(_mae),\n",
    "        'CRPS': np.mean(_crps),\n",
    "        'NLL/D': np.mean(nll), #nll[i],#np.mean(nll),\n",
    "        'Type':k,\n",
    "        'MMLU Accuracy': mmlu_numbers[k],\n",
    "    }\n",
    "    df.append(d)\n",
    "df = pd.DataFrame(df)\n",
    "\n",
    "\n",
    "host_name_map = {\n",
    "    'openai': 'OpenAI',\n",
    "    'cohere': 'Cohere',\n",
    "    'forefront': 'Forefront',\n",
    "    'alephalpha': 'Aleph Alpha',\n",
    "    'LLaMA': 'LLaMA'\n",
    "}\n",
    "\n",
    "def hostify(x):\n",
    "    if x in ['Ada','Babbage','Curie','Davinci']:\n",
    "        return 'openai-'+x\n",
    "    if 'LLaMA' in x:\n",
    "        return x.replace('LLaMA','LLaMA-')\n",
    "    return x\n",
    "\n",
    "df['Type'] = df['Type'].apply(lambda x: name_map[x])\n",
    "df['Type'] = df['Type'].apply(hostify)\n",
    "df['host'] = df['Type'].apply(lambda x: host_name_map[x.split(\"-\")[0]])\n",
    "\n",
    "# fig, ax = plt.subplots(1, 1, figsize=(3.5, 2.2))\n",
    "\n",
    "fig, ax = plt.subplots(1, 1, figsize=(3.5, 2), gridspec_kw = {'wspace': 0.6})\n",
    "\n",
    "sns.regplot(\n",
    "    data=df,\n",
    "    x='MMLU Accuracy',\n",
    "    # x='Dataset',\n",
    "    y='CRPS',\n",
    "    order=1,\n",
    "    # errorbar='se',\n",
    "    ax=ax, \n",
    "    scatter_kws={'color':'white'},\n",
    ")\n",
    "\n",
    "sns.scatterplot(\n",
    "    data=df,\n",
    "    x='MMLU Accuracy',\n",
    "    y='CRPS',\n",
    "    color='black',\n",
    "    # hue='host',\n",
    "    # palette='Dark2',\n",
    "    ax=ax,\n",
    ")\n",
    "\n",
    "#remove pad between yticklabels and axis\n",
    "ax.tick_params(axis='y', which='major', pad=0)\n",
    "ax.tick_params(axis='x', which='major', pad=0)\n",
    "\n",
    "ax.set_xlabel(ax.get_xlabel(), fontsize=16)\n",
    "ax.set_ylabel(ax.get_ylabel(), fontsize=16)\n",
    "\n",
    "plt.savefig('small_crps_vs_mmlu.png', bbox_inches='tight', dpi=300)\n",
    "plt.savefig('small_crps_vs_mmlu.svg', bbox_inches='tight')\n",
    "plt.tight_layout()\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "df.to_csv(\"mmlu_top_fig.csv\", index=False)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
