{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e6bc27b1-7903-4d36-9198-190923b85d71",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Within this notebook we adopt the convention that for metrics lower is better.\n",
    "# For metrics where higher is better like AUC we flip the sign to negative.\n",
    "\n",
    "is_azure = False  # if this is set to True, login with 'az login' before\n",
    "n_replicates = 25  # 25 is almost costless since with 30 we get to saturation of 650 runners in 2 hours\n",
    "n_instances = 810\n",
    "\n",
    "force_recreate = False\n",
    "exist_ok = True\n",
    "TIMEOUT_SEC = 60 * 60 * 24 * 180  # 180 days\n",
    "wheel_filepaths = ['interpret_core-0.7.1-py3-none-any.whl', 'powerlift-0.1.12-py3-none-any.whl']\n",
    "\n",
    "import datetime\n",
    "experiment_name = datetime.datetime.now().strftime('%Y_%m_%d_%H%M__') + 'myexperiment'\n",
    "# experiment_name = 'yyyy_mm_dd_hhmm__myexperiment'\n",
    "\n",
    "print('Experiment name: ' + experiment_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "da595392-3475-42f9-80c7-859c8165d613",
   "metadata": {},
   "outputs": [],
   "source": [
    "# use exact versions for reproducibility of the RANK ordering\n",
    "requirements = \"numpy==1.26.4 pandas==2.2.2 scikit-learn==1.5.1 optuna==4.0.0 optuna-integration==4.0.0 xgboost==2.1.0 lightgbm==4.5.0 catboost==1.2.5 aplr==10.6.1 tabpfn==2.0.1\"\n",
    "!pip install -U --quiet {requirements}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8489becf-d522-42a0-af6e-ec99c12d6a87",
   "metadata": {},
   "outputs": [],
   "source": [
    "# install interpret if not already installed\n",
    "try:\n",
    "    import interpret\n",
    "except ModuleNotFoundError:\n",
    "    !pip install -U --quiet interpret-core"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5674068-d971-49df-b8a1-60bf91441def",
   "metadata": {},
   "outputs": [],
   "source": [
    "# install powerlift if not already installed\n",
    "try:\n",
    "    import powerlift\n",
    "except ModuleNotFoundError:\n",
    "    !pip install -U --quiet powerlift[datasets,postgres]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9e7ee509-945c-4376-aab9-72387298bffc",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "if is_azure:\n",
    "    import requests\n",
    "    import json\n",
    "    import subprocess\n",
    "    from azure.identity import AzureCliCredential\n",
    "    credential = AzureCliCredential()\n",
    "    access_token = credential.get_token(\"https://graph.microsoft.com/.default\").token\n",
    "    headers = {'Authorization': f'Bearer {access_token}', 'Content-Type': 'application/json'}\n",
    "    azure_client_id = requests.get('https://graph.microsoft.com/v1.0/me', headers=headers).json().get('id')\n",
    "    azure_tenant_id = requests.get('https://graph.microsoft.com/v1.0/organization', headers=headers).json()['value'][0].get('id')\n",
    "    subscription_id = json.loads(subprocess.run(\"az account show\", capture_output=True, text=True, shell=True).stdout).get(\"id\")\n",
    "\n",
    "    from dotenv import load_dotenv\n",
    "    load_dotenv()\n",
    "    conn_str = os.getenv(\"DOCKER_DB_URL\")\n",
    "    resource_group = os.getenv(\"AZURE_RESOURCE_GROUP\")\n",
    "else:\n",
    "    conn_str = f\"sqlite:///{os.getcwd()}/powerlift.db\"\n",
    "\n",
    "from powerlift.bench import Store, Benchmark\n",
    "store = Store(conn_str, force_recreate=force_recreate)\n",
    "benchmark = Benchmark(store, name=experiment_name)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e7cfe45-d0f1-4951-953e-cafefee1ae2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def trial_filter(task):\n",
    "    min_samples = 1\n",
    "    max_samples = 1000000000000\n",
    "    min_features = 1\n",
    "    max_features = 1000000000000\n",
    "    min_total_categories = 0\n",
    "    max_total_categories = 1000000000000\n",
    "    max_classes = 1000000000000\n",
    "    \n",
    "    #max_samples = 10000  # For TabPFN\n",
    "    #max_features = 500   # For TabPFN\n",
    "    # ? max_total_categories = 0   # For TabPFN\n",
    "    #max_classes = 10   # For TabPFN\n",
    "    \n",
    "    if task.n_samples < min_samples:\n",
    "        return []\n",
    "    if max_samples < task.n_samples:\n",
    "        return []\n",
    "    if task.n_features < min_features:\n",
    "        return []\n",
    "    if max_features < task.n_features:\n",
    "        return []\n",
    "    if task.total_categories < min_total_categories:\n",
    "        return []\n",
    "    if max_total_categories < task.total_categories:\n",
    "        return []\n",
    "    if max_classes < task.n_classes:\n",
    "        return []\n",
    "\n",
    "\n",
    "    if task.origin == \"openml_automl_regression\":\n",
    "        pass  # include in benchmark\n",
    "    elif task.origin == \"openml_cc18\":\n",
    "        pass  # include in benchmark\n",
    "    elif task.origin == \"openml_automl_classification\":\n",
    "        return []\n",
    "    elif task.origin == \"pmlb\":\n",
    "        if task.problem == \"binary\":\n",
    "            return []\n",
    "        elif task.problem == \"multiclass\":\n",
    "            return []\n",
    "        elif task.problem == \"regression\":\n",
    "            return []\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized problem {task.problem}\")\n",
    "    else:\n",
    "        raise Exception(f\"Unrecognized origin {task.origin}\")\n",
    "\n",
    "    \n",
    "    exclude_set = set()\n",
    "\n",
    "    exclude_set = set(['Devnagari-Script', 'mnist_784', 'isolet', 'Fashion-MNIST', 'har'])  # TODO: reintroduce\n",
    "\n",
    "#    exclude_set = set([\n",
    "#        'Fashion-MNIST', 'mfeat-pixel', 'Bioresponse',\n",
    "#        'mfeat-factors', 'isolet', 'cnae-9', \"Internet-Advertisements\",\n",
    "#        'har', 'Devnagari-Script', 'mnist_784', 'CIFAR_10',\n",
    "#        'Airlines_DepDelay_10M',\n",
    "#    ])\n",
    "    if task.name in exclude_set:\n",
    "        return []\n",
    "\n",
    "\n",
    "    # exclude duplicates of a dataset if they appear twice\n",
    "    global global_duplicates\n",
    "    try:\n",
    "        duplicates = global_duplicates\n",
    "    except NameError:\n",
    "        duplicates = set()\n",
    "        global_duplicates = duplicates\n",
    "    key = (task.name, task.n_samples, task.n_features)\n",
    "    if key in duplicates:\n",
    "        print(f\"Excluding duplicate: {key}\")\n",
    "        return []\n",
    "    else:\n",
    "        duplicates.add(key)\n",
    "\n",
    "\n",
    "    return [\n",
    "        (\"ebm\", {'interactions': 0.5}),\n",
    "        (\"ebm\", {'interactions': 0.0}),\n",
    "        #\"ebm_opt\",\n",
    "        \"xgb\",\n",
    "        #\"xgb_opt\",\n",
    "        \"lgbm\",\n",
    "        #\"lgbm_opt\",\n",
    "        \"catb\",\n",
    "        #\"catb_opt\",\n",
    "        \"rf_xgb\",\n",
    "        \"rf_sk\",\n",
    "        \"ert\",\n",
    "        \"tree\",\n",
    "        \"elastic\",\n",
    "        \"sgd\",\n",
    "        \"lm\",\n",
    "        \"lsvm\",\n",
    "        \"svm\",\n",
    "        \"nn\",\n",
    "        \"knn\",\n",
    "        \"aplr\",\n",
    "        #\"tabpfn\",\n",
    "    ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b43ca15-4a20-4622-a877-0a5af322b2cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def trial_runner(trial):\n",
    "    seed = 42\n",
    "    seed += trial.replicate_num\n",
    "    max_samples = None\n",
    "    n_calibration_folds = 4  # 4 uses all cores on the containers\n",
    "\n",
    "    from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n",
    "    from interpret.develop import set_option\n",
    "    from interpret.utils._native import Native\n",
    "    from xgboost import XGBClassifier, XGBRegressor, XGBRFClassifier, XGBRFRegressor\n",
    "    from lightgbm import LGBMClassifier, LGBMRegressor\n",
    "    from catboost import CatBoostClassifier, CatBoostRegressor\n",
    "    from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor, ExtraTreesClassifier, ExtraTreesRegressor\n",
    "    from sklearn.linear_model import LogisticRegression, LinearRegression, ElasticNet, SGDClassifier, SGDRegressor\n",
    "    from sklearn.svm import LinearSVC, LinearSVR, SVC, SVR\n",
    "    from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\n",
    "    from sklearn.neural_network import MLPClassifier, MLPRegressor\n",
    "    from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor\n",
    "    from aplr import APLRClassifier, APLRRegressor\n",
    "    from tabpfn import TabPFNClassifier, TabPFNRegressor\n",
    "    from sklearn.model_selection import train_test_split\n",
    "    from sklearn.preprocessing import OneHotEncoder, StandardScaler\n",
    "    from sklearn.impute import SimpleImputer\n",
    "    from sklearn.compose import ColumnTransformer\n",
    "    from sklearn.pipeline import Pipeline\n",
    "    from sklearn.calibration import CalibratedClassifierCV\n",
    "    import optuna\n",
    "    from optuna_integration.sklearn import OptunaSearchCV\n",
    "    from sklearn.metrics import log_loss, roc_auc_score, brier_score_loss, precision_score, recall_score, accuracy_score, balanced_accuracy_score\n",
    "    from sklearn.metrics import root_mean_squared_error, r2_score, mean_absolute_error, mean_absolute_percentage_error, median_absolute_error\n",
    "    import numpy as np\n",
    "    from time import time\n",
    "    import warnings\n",
    "    import gc\n",
    "    import re\n",
    "    import random\n",
    "\n",
    "    # Turn off AVX512F because in Azure some machines have it and others do not leading to non-repeatable results.\n",
    "    set_option(\"acceleration\", ~Native.AccelerationFlags_AVX512F)\n",
    "    \n",
    "    random.seed(seed)\n",
    "    np.random.seed(seed)\n",
    "\n",
    "    X, y = trial.task.data()\n",
    "\n",
    "    if trial.task.problem == \"regression\":\n",
    "        q75, q25 = np.percentile(y, [75, 25])\n",
    "        interquartile_range = q75 - q25\n",
    "\n",
    "    for col in X.columns:\n",
    "        # catboost doesn't like missing categoricals, so make them a category\n",
    "        col_data = X[col]\n",
    "        if str(col_data.dtype) == \"category\" and col_data.isnull().any():\n",
    "            X[col] = col_data.cat.add_categories('nan').fillna('nan')\n",
    "    \n",
    "    stratification = None\n",
    "    if trial.task.problem in [\"binary\", \"multiclass\"]:\n",
    "        # stratification = y\n",
    "        pass  # Re-enable stratification if dataset fails from absent class in train/test sets (PMLB)\n",
    "\n",
    "    # Airlines_DepDelay_10M crashes on 16GB machines using 20% test set when we require dense one hot encoded data (APLR).\n",
    "    test_size = 0.2 if trial.task.name not in {\"Airlines_DepDelay_10M\"} else 0.1\n",
    "    \n",
    "    fit_params = {}\n",
    "    fit_params[\"X\"], X_test, fit_params[\"y\"], y_test = train_test_split(X, y, test_size=test_size, stratify=stratification, random_state=seed)\n",
    "    del y\n",
    "    del X\n",
    "\n",
    "    cat_bools = trial.task.meta[\"categorical_mask\"]\n",
    "    cat_cols = [i for i, val in enumerate(cat_bools) if val]\n",
    "    num_cols = [i for i, val in enumerate(cat_bools) if not val]\n",
    "\n",
    "    # Build optional preprocessor for use by methods below\n",
    "    # missing categoricals already handled above by making new \"nan\" category\n",
    "    cat_encoder = OneHotEncoder(handle_unknown=\"ignore\", sparse_output=True, dtype=np.int16)\n",
    "    num_imputer = Pipeline(steps=[('imputer', SimpleImputer(strategy='mean')), ('scaler', StandardScaler())])\n",
    "    transformers = [(\"cat\", cat_encoder, cat_cols), (\"num\", num_imputer, num_cols)]\n",
    "    p = ColumnTransformer(transformers=transformers)  #, sparse_threshold=1.0)  # densify or sparsify\n",
    "\n",
    "    def callback_generator(seconds):\n",
    "        class Callback:\n",
    "            def __init__(self, seconds):\n",
    "                self.seconds = seconds\n",
    "            def __call__(self, bag_index, step_index, progress, metric):\n",
    "                import time\n",
    "                if not hasattr(self, 'end_time'):\n",
    "                    self.end_time = time.monotonic() + self.seconds\n",
    "                    return False\n",
    "                else:\n",
    "                    return time.monotonic() > self.end_time\n",
    "        return Callback(seconds)\n",
    "\n",
    "    ebm_params = trial.meta\n",
    "    xgb_params = trial.meta\n",
    "    lgbm_params = trial.meta\n",
    "    catboost_params = trial.meta\n",
    "    rf_xgb_params = trial.meta\n",
    "    rf_sk_params = trial.meta\n",
    "    ert_params = trial.meta\n",
    "    tree_params = trial.meta\n",
    "    elastic_params = trial.meta\n",
    "    sgd_params = trial.meta\n",
    "    lm_params = trial.meta\n",
    "    lsvm_params = trial.meta\n",
    "    svm_params = trial.meta\n",
    "    nn_params = trial.meta\n",
    "    knn_params = trial.meta\n",
    "    aplr_params = trial.meta\n",
    "    tabpfn_params = trial.meta\n",
    "\n",
    "    # ebm_params[\"callback\"] = callback_generator(60.0 * 60.0)  # 1 hour\n",
    "    ebm_params[\"feature_types\"] = [\"nominal\" if cat else \"continuous\" for cat in cat_bools]\n",
    "    ebm_params[\"n_jobs\"] = -1\n",
    "    ebm_params[\"random_state\"] = seed\n",
    "    xgb_params[\"enable_categorical\"] = True\n",
    "    xgb_params[\"feature_types\"] = [\"c\" if cat else \"q\" for cat in cat_bools]\n",
    "    lgbm_params[\"verbosity\"] = -1\n",
    "    catboost_params[\"verbose\"] = False\n",
    "    rf_xgb_params[\"enable_categorical\"] = True\n",
    "    rf_xgb_params[\"feature_types\"] = [\"c\" if cat else \"q\" for cat in cat_bools]\n",
    "    rf_sk_params[\"random_state\"] = seed\n",
    "    rf_sk_params[\"n_jobs\"] = -1\n",
    "    ert_params[\"n_jobs\"] = -1\n",
    "    ert_params[\"random_state\"] = seed\n",
    "    tree_params[\"random_state\"] = seed\n",
    "    elastic_params[\"random_state\"] = seed\n",
    "    sgd_params[\"random_state\"] = seed\n",
    "    lm_params[\"n_jobs\"] = -1\n",
    "    lsvm_params[\"random_state\"] = seed\n",
    "    nn_params[\"random_state\"] = seed\n",
    "    knn_params[\"n_jobs\"] = -1\n",
    "    aplr_params[\"m\"] = 3000\n",
    "    tabpfn_params[\"random_state\"] = seed\n",
    "    #tabpfn_params[\"categorical_features_indices\"] = ...\n",
    "\n",
    "    if 1700 < trial.task.n_features:\n",
    "        # TODO: EBMs can crash for now with too many interactions, so limit it until we have better fix\n",
    "        # Bioresponse with 1776 features works most of the time, but occasionally fails.\n",
    "        # Santander_transaction_value with 4991 features does not work.\n",
    "        ebm_params[\"interactions\"] = 0\n",
    "\n",
    "    # DEBUG params to make the algorithms super fast\n",
    "    #if 10000 < len(fit_params[\"y\"]):\n",
    "    #    debug_stratify = fit_params[\"y\"] if trial.task.problem in [\"binary\", \"multiclass\"] else None\n",
    "    #    _, fit_params[\"X\"], _, fit_params[\"y\"] = train_test_split(fit_params[\"X\"], fit_params[\"y\"], test_size=5000, stratify=debug_stratify, random_state=seed)\n",
    "    #ebm_params[\"max_rounds\"] = 1\n",
    "    #ebm_params[\"interactions\"] = 0\n",
    "    #xgb_params[\"n_estimators\"] = 1\n",
    "    #lgbm_params[\"n_estimators\"] = 1\n",
    "    #catboost_params[\"n_estimators\"] = 1\n",
    "    #rf_xgb_params[\"n_estimators\"] = 1\n",
    "    #rf_sk_params[\"n_estimators\"] = 1\n",
    "    #ert_params[\"n_estimators\"] = 1\n",
    "    #tree_params[\"max_depth\"] = 1\n",
    "    #elastic_params[\"max_iter\"] = 1\n",
    "    #sgd_params[\"max_iter\"] = 1\n",
    "    #lsvm_params[\"max_iter\"] = 1\n",
    "    #nn_params[\"max_iter\"] = 1\n",
    "    #knn_params[\"n_neighbors\"] = 1\n",
    "    #knn_params[\"leaf_size\"] = 1\n",
    "    #aplr_params[\"m\"] = 1\n",
    "\n",
    "    # for these datasets, we have to subsample so much it is probably better to just use non-optimized\n",
    "    ebm_classification_non_opt = {\"Devnagari-Script\", \"CIFAR_10\", \"Fashion-MNIST\", \"mnist_784\", \"isolet\", \"MiceProtein\", \"cnae-9\", \"Bioresponse\", \"Internet-Advertisements\", \"madelon\", \"har\", \"texture\"}\n",
    "    xgb_classification_non_opt = {\"Devnagari-Script\", \"CIFAR_10\", \"mnist_784\", \"Fashion-MNIST\", \"isolet\"}\n",
    "    lgbm_classification_non_opt = {\"Devnagari-Script\", \"CIFAR_10\", \"mnist_784\", \"Fashion-MNIST\", \"isolet\"}\n",
    "    catb_classification_non_opt = {\"Devnagari-Script\", \"CIFAR_10\", \"Fashion-MNIST\", \"mnist_784\", \"isolet\", \"MiceProtein\", \"madelon\"}\n",
    "\n",
    "    # Specify method\n",
    "    if trial.task.problem in [\"binary\", \"multiclass\"]:\n",
    "        if trial.method == \"ebm\" or trial.method == \"ebm_opt\" and trial.task.name in ebm_classification_non_opt:\n",
    "            for param, val in ebm_params.copy().items():\n",
    "                try:\n",
    "                    set_option(param, val)\n",
    "                    del ebm_params[param]\n",
    "                except:\n",
    "                    pass\n",
    "            est = ExplainableBoostingClassifier(**ebm_params)\n",
    "        elif trial.method == \"ebm_opt\":\n",
    "            for param, val in ebm_params.copy().items():\n",
    "                try:\n",
    "                    set_option(param, val)\n",
    "                    del ebm_params[param]\n",
    "                except:\n",
    "                    pass\n",
    "            # TODO: change these optimization parameters\n",
    "            param_grid = {\n",
    "                'smoothing_rounds': optuna.distributions.IntDistribution(1, 4000, log=True),\n",
    "                'interactions': optuna.distributions.FloatDistribution(0.0, 0.999),\n",
    "                'inner_bags': optuna.distributions.IntDistribution(0, 0, step=50),  # would prefer 50\n",
    "                'max_bins': optuna.distributions.IntDistribution(256, 65536, log=True),\n",
    "                'max_interaction_bins': optuna.distributions.IntDistribution(8, 128, log=True),\n",
    "                'greedy_ratio': optuna.distributions.FloatDistribution(0.0001, 4.0),\n",
    "                'cyclic_progress': optuna.distributions.FloatDistribution(0.0, 1.0),\n",
    "                'outer_bags': optuna.distributions.IntDistribution(14, 14),  # would prefer more\n",
    "                'interaction_smoothing_rounds': optuna.distributions.IntDistribution(1, 500, log=True),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.0025, 0.5, log=True),\n",
    "                'max_leaves': optuna.distributions.IntDistribution(2, 5),\n",
    "                'min_samples_leaf': optuna.distributions.IntDistribution(2, 100, log=True),\n",
    "                'min_hessian': optuna.distributions.FloatDistribution(0.000001, 10.0, log=True),\n",
    "                'max_rounds': optuna.distributions.IntDistribution(25000, 25000),\n",
    "                'early_stopping_rounds': optuna.distributions.IntDistribution(50, 50),\n",
    "                'early_stopping_tolerance': optuna.distributions.FloatDistribution(1e-10, 1e-5, log=True),\n",
    "                'validation_size': optuna.distributions.FloatDistribution(0.1, 0.5),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=ExplainableBoostingClassifier(**ebm_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_log_loss',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # EBM uses the cores efficiently\n",
    "            )\n",
    "        elif trial.method == \"xgb\" or trial.method == \"xgb_opt\" and trial.task.name in xgb_classification_non_opt:\n",
    "            est = XGBClassifier(**xgb_params)\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"xgb_opt\":\n",
    "            # from https://github.com/optuna/optuna-examples/blob/main/xgboost/xgboost_cv.py\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'n_estimators': optuna.distributions.IntDistribution(50, 2000, log=True),\n",
    "                'max_depth': optuna.distributions.IntDistribution(1, 9),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.005, 0.5, log=True),\n",
    "                'gamma': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'min_child_weight': optuna.distributions.FloatDistribution(2, 10),\n",
    "                'subsample': optuna.distributions.FloatDistribution(0.2, 1.0),\n",
    "                'colsample_bytree': optuna.distributions.FloatDistribution(0.2, 1.0),\n",
    "                'reg_alpha': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'reg_lambda': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'grow_policy': optuna.distributions.CategoricalDistribution([\"depthwise\", \"lossguide\"]),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=XGBClassifier(**xgb_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_log_loss',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # catboost uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"lgbm\" or trial.method == \"lgbm_opt\" and trial.task.name in lgbm_classification_non_opt:\n",
    "            est = LGBMClassifier(**lgbm_params)\n",
    "            fit_params[\"categorical_feature\"] = cat_cols\n",
    "        elif trial.method == \"lgbm_opt\":\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'num_leaves': optuna.distributions.IntDistribution(2, 256, log=True),\n",
    "                'max_depth': optuna.distributions.IntDistribution(-1, 30),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.005, 0.5, log=True),\n",
    "                'n_estimators': optuna.distributions.IntDistribution(50, 2000, log=True),\n",
    "                'min_child_samples': optuna.distributions.IntDistribution(2, 100),\n",
    "                'subsample_freq': optuna.distributions.IntDistribution(1, 1),\n",
    "                'subsample': optuna.distributions.FloatDistribution(0.4, 1.0),\n",
    "                'colsample_bytree': optuna.distributions.FloatDistribution(0.4, 1.0),\n",
    "                'reg_alpha': optuna.distributions.FloatDistribution(1e-8, 10.0, log=True),\n",
    "                'reg_lambda': optuna.distributions.FloatDistribution(1e-8, 10.0, log=True)\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=LGBMClassifier(**lgbm_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_log_loss',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # lGBM uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"categorical_feature\"] = cat_cols\n",
    "        elif trial.method == \"catb\" or trial.method == \"catb_opt\" and trial.task.name in catb_classification_non_opt:\n",
    "            est = CatBoostClassifier(**catboost_params)\n",
    "            fit_params[\"cat_features\"] = cat_cols\n",
    "        elif trial.method == \"catb_opt\":\n",
    "            # from https://forecastegy.com/posts/catboost-hyperparameter-tuning-guide-with-optuna/\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(1e-3, 0.1, log=True),\n",
    "                'depth': optuna.distributions.IntDistribution(1, 10),\n",
    "                'colsample_bylevel': optuna.distributions.FloatDistribution(0.05, 1.0),\n",
    "                'min_data_in_leaf': optuna.distributions.IntDistribution(1, 100),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=CatBoostClassifier(**catboost_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_log_loss',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # catboost uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"cat_features\"] = cat_cols\n",
    "        elif trial.method == \"rf_xgb\":\n",
    "            est = XGBRFClassifier(**rf_xgb_params)\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"rf_sk\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", RandomForestClassifier(**rf_sk_params))])\n",
    "        elif trial.method == \"ert\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", ExtraTreesClassifier(**ert_params))])\n",
    "        elif trial.method == \"tree\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", DecisionTreeClassifier(**tree_params))])\n",
    "        elif trial.method == \"elastic\":\n",
    "            elastic_params[\"n_jobs\"] = -1\n",
    "            est = Pipeline([(\"p\", p), (\"est\", LogisticRegression(penalty='elasticnet', solver='saga', l1_ratio=0.5, **elastic_params))])\n",
    "        elif trial.method == \"sgd\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", CalibratedClassifierCV(SGDClassifier(**sgd_params), n_jobs=-1, cv=n_calibration_folds))])\n",
    "        elif trial.method == \"lm\":\n",
    "            lm_params[\"random_state\"] = seed\n",
    "            est = Pipeline([(\"p\", p), (\"est\", LogisticRegression(**lm_params))])\n",
    "        elif trial.method == \"lsvm\":\n",
    "            if trial.task.name in {\"CIFAR_10\"}:\n",
    "                max_samples = 30000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Fashion-MNIST\"}:\n",
    "                max_samples = 20000  # OMM crashes without subsampling\n",
    "            if trial.task.name in {\"mnist_784\"}:\n",
    "                max_samples = 40000  # OMM crashes without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", CalibratedClassifierCV(LinearSVC(**lsvm_params), n_jobs=-1, cv=n_calibration_folds))])\n",
    "        elif trial.method == \"svm\":\n",
    "            if trial.task.name in {\"Fashion-MNIST\"}:\n",
    "                max_samples = 40000  # OMM crashes without subsampling\n",
    "            if trial.task.name in {\"CIFAR_10\"}:\n",
    "                max_samples = 50000  # crashes or fit time too long without subsampling\n",
    "            svm_params[\"random_state\"] = seed\n",
    "            est = Pipeline([(\"p\", p), (\"est\", CalibratedClassifierCV(SVC(**svm_params), n_jobs=-1, cv=n_calibration_folds))])\n",
    "        elif trial.method == \"nn\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", MLPClassifier(**nn_params))])\n",
    "        elif trial.method == \"knn\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", KNeighborsClassifier(**knn_params))])\n",
    "        elif trial.method == \"aplr\":\n",
    "            fit_params[\"y\"] = fit_params[\"y\"].astype(str)\n",
    "            p.sparse_threshold = 0  # APLR only handles dense\n",
    "            if trial.task.name in {\"CIFAR_10\"}:\n",
    "                max_samples = 10000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Fashion-MNIST\"}:\n",
    "                max_samples = 20000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"mnist_784\"}:\n",
    "                max_samples = 15000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Devnagari-Script\"}:\n",
    "                # Devnagari-Script with 5000 samples takes 20,000 seconds\n",
    "                max_samples = 5000  # crashes or fit time too long without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", APLRClassifier(**aplr_params))])\n",
    "        elif trial.method == \"tabpfn\":\n",
    "            est = TabPFNClassifier(**tabpfn_params)\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized classification method name {trial.method}\")\n",
    "    elif trial.task.problem == \"regression\":\n",
    "        if trial.method == \"ebm\":\n",
    "            for param, val in ebm_params.copy().items():\n",
    "                try:\n",
    "                    set_option(param, val)\n",
    "                    del ebm_params[param]\n",
    "                except:\n",
    "                    pass\n",
    "            est = ExplainableBoostingRegressor(**ebm_params)\n",
    "        elif trial.method == \"ebm_opt\":\n",
    "            for param, val in ebm_params.copy().items():\n",
    "                try:\n",
    "                    set_option(param, val)\n",
    "                    del ebm_params[param]\n",
    "                except:\n",
    "                    pass\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 5000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 50000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"nyc-taxi-green-dec-2016\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 20000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                max_samples = 2000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Yolanda\"}:\n",
    "                max_samples = 2000  # crashes or fit time too long without subsampling\n",
    "\n",
    "            # TODO: these two new ones need to be ranged\n",
    "            if trial.task.name in {\"Santander_transaction_value\"}:\n",
    "                # TODO: determine\n",
    "                max_samples = 1000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"pol\"}:\n",
    "                # TODO: determine\n",
    "                max_samples = 1000  # crashes or fit time too long without subsampling\n",
    "            \n",
    "            # TODO: change these optimization parameters\n",
    "            param_grid = {\n",
    "                'smoothing_rounds': optuna.distributions.IntDistribution(1, 4000, log=True),\n",
    "                'interactions': optuna.distributions.FloatDistribution(0.0, 0.999),\n",
    "                'inner_bags': optuna.distributions.IntDistribution(0, 0, step=50),  # would prefer 50\n",
    "                'max_bins': optuna.distributions.IntDistribution(256, 65536, log=True),\n",
    "                'max_interaction_bins': optuna.distributions.IntDistribution(8, 128, log=True),\n",
    "                'greedy_ratio': optuna.distributions.FloatDistribution(0.0001, 4.0),\n",
    "                'cyclic_progress': optuna.distributions.FloatDistribution(0.0, 1.0),\n",
    "                'outer_bags': optuna.distributions.IntDistribution(14, 14),  # would prefer more\n",
    "                'interaction_smoothing_rounds': optuna.distributions.IntDistribution(1, 500, log=True),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.0025, 0.5, log=True),\n",
    "                'max_leaves': optuna.distributions.IntDistribution(2, 5),\n",
    "                'min_samples_leaf': optuna.distributions.IntDistribution(2, 100, log=True),\n",
    "                'min_hessian': optuna.distributions.FloatDistribution(0.000001, 10.0, log=True),\n",
    "                'max_rounds': optuna.distributions.IntDistribution(25000, 25000),\n",
    "                'early_stopping_rounds': optuna.distributions.IntDistribution(50, 50),\n",
    "                'early_stopping_tolerance': optuna.distributions.FloatDistribution(1e-10, 1e-5, log=True),\n",
    "                'validation_size': optuna.distributions.FloatDistribution(0.1, 0.5),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=ExplainableBoostingRegressor(**ebm_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_mean_squared_error',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # EBM uses the cores efficiently\n",
    "            )\n",
    "        elif trial.method == \"xgb\":\n",
    "            est = XGBRegressor(**xgb_params)\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"xgb_opt\":\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"nyc-taxi-green-dec-2016\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Yolanda\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 50000  # crashes or fit time too long without subsampling\n",
    "\n",
    "            # from https://github.com/optuna/optuna-examples/blob/main/xgboost/xgboost_cv.py\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'n_estimators': optuna.distributions.IntDistribution(50, 2000, log=True),\n",
    "                'max_depth': optuna.distributions.IntDistribution(1, 9),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.005, 0.5, log=True),\n",
    "                'gamma': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'min_child_weight': optuna.distributions.FloatDistribution(2, 10),\n",
    "                'subsample': optuna.distributions.FloatDistribution(0.2, 1.0),\n",
    "                'colsample_bytree': optuna.distributions.FloatDistribution(0.2, 1.0),\n",
    "                'reg_alpha': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'reg_lambda': optuna.distributions.FloatDistribution(1e-8, 1.0, log=True),\n",
    "                'grow_policy': optuna.distributions.CategoricalDistribution([\"depthwise\", \"lossguide\"]),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=XGBRegressor(**xgb_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_mean_squared_error',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # catboost uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"lgbm\":\n",
    "            est = LGBMRegressor(**lgbm_params)\n",
    "            fit_params[\"categorical_feature\"] = cat_cols\n",
    "        elif trial.method == \"lgbm_opt\":\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 2000000  # crashes or fit time too long without subsampling\n",
    "\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'num_leaves': optuna.distributions.IntDistribution(2, 256, log=True),\n",
    "                'max_depth': optuna.distributions.IntDistribution(-1, 30),\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(0.005, 0.5, log=True),\n",
    "                'n_estimators': optuna.distributions.IntDistribution(50, 2000, log=True),\n",
    "                'min_child_samples': optuna.distributions.IntDistribution(2, 100),\n",
    "                'subsample_freq': optuna.distributions.IntDistribution(1, 1),\n",
    "                'subsample': optuna.distributions.FloatDistribution(0.4, 1.0),\n",
    "                'colsample_bytree': optuna.distributions.FloatDistribution(0.4, 1.0),\n",
    "                'reg_alpha': optuna.distributions.FloatDistribution(1e-8, 10.0, log=True),\n",
    "                'reg_lambda': optuna.distributions.FloatDistribution(1e-8, 10.0, log=True)\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=LGBMRegressor(**lgbm_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_mean_squared_error',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # lGBM uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"categorical_feature\"] = cat_cols\n",
    "        elif trial.method == \"catb\":\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 5000000  # OOM crashes without subsampling\n",
    "            est = CatBoostRegressor(**catboost_params)\n",
    "            fit_params[\"cat_features\"] = cat_cols\n",
    "        elif trial.method == \"catb_opt\":\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 8000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"nyc-taxi-green-dec-2016\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 50000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 5000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Yolanda\"}:\n",
    "                # TODO: tweak\n",
    "                max_samples = 5000  # crashes or fit time too long without subsampling\n",
    "\n",
    "            # from https://forecastegy.com/posts/catboost-hyperparameter-tuning-guide-with-optuna/\n",
    "            # TODO: change and harmonize these optimization parameters\n",
    "            param_grid = {\n",
    "                'learning_rate': optuna.distributions.FloatDistribution(1e-3, 0.1, log=True),\n",
    "                'depth': optuna.distributions.IntDistribution(1, 10),\n",
    "                'colsample_bylevel': optuna.distributions.FloatDistribution(0.05, 1.0),\n",
    "                'min_data_in_leaf': optuna.distributions.IntDistribution(1, 100),\n",
    "            }\n",
    "            est = OptunaSearchCV(\n",
    "                estimator=CatBoostRegressor(**catboost_params),\n",
    "                param_distributions=param_grid,\n",
    "                cv=n_calibration_folds,\n",
    "                n_trials=50,\n",
    "                scoring='neg_mean_squared_error',\n",
    "                verbose=0,\n",
    "                random_state=seed,\n",
    "                n_jobs=1  # catboost uses the cores efficiently\n",
    "            )\n",
    "            fit_params[\"cat_features\"] = cat_cols\n",
    "        elif trial.method == \"rf_xgb\":\n",
    "            est = XGBRFRegressor(**rf_xgb_params)\n",
    "            fit_params[\"verbose\"] = False\n",
    "        elif trial.method == \"rf_sk\":\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                max_samples = 200000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 500000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                max_samples = 200000  # OOM crashes without subsampling (583,250 samples originally)\n",
    "            est = Pipeline([(\"p\", p), (\"est\", RandomForestRegressor(**rf_sk_params))])\n",
    "        elif trial.method == \"ert\":\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 300000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                max_samples = 200000  # OOM crashes without subsampling (583,250 samples originally)\n",
    "            if trial.task.name in {\"Yolanda\"}:\n",
    "                max_samples = 200000  # OOM crashes without subsampling (400,000 samples originally)\n",
    "            est = Pipeline([(\"p\", p), (\"est\", ExtraTreesRegressor(**ert_params))])\n",
    "        elif trial.method == \"tree\":\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 2000000  # fit time too long without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", DecisionTreeRegressor(**tree_params))])\n",
    "        elif trial.method == \"elastic\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", ElasticNet(**elastic_params))])\n",
    "        elif trial.method == \"sgd\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", SGDRegressor(**sgd_params))])\n",
    "        elif trial.method == \"lm\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", LinearRegression(**lm_params))])\n",
    "        elif trial.method == \"lsvm\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", LinearSVR(**lsvm_params))])\n",
    "        elif trial.method == \"svm\":\n",
    "            if trial.task.name in {\"Allstate_Claims_Severity\"}:\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"nyc-taxi-green-dec-2016\"}:\n",
    "                max_samples = 150000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Buzzinsocialmedia_Twitter\"}:\n",
    "                max_samples = 300000  # crashes or fit time too long without subsampling\n",
    "            if trial.task.name in {\"Yolanda\"}:\n",
    "                # TODO: tweak. Some exit quicker, but others take LOOOONG.\n",
    "                max_samples = 200000  # crashes or fit time too long without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", SVR(**svm_params))])\n",
    "        elif trial.method == \"nn\":\n",
    "            est = Pipeline([(\"p\", p), (\"est\", MLPRegressor(**nn_params))])\n",
    "        elif trial.method == \"knn\":\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 100000  # crashes or fit time too long without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", KNeighborsRegressor(**knn_params))])\n",
    "        elif trial.method == \"aplr\":\n",
    "            p.sparse_threshold = 0  # APLR only accepts dense data\n",
    "            if trial.task.name in {\"Airlines_DepDelay_10M\"}:\n",
    "                max_samples = 100000  # OOM crashes without subsampling\n",
    "            if trial.task.name in {\"nyc-taxi-green-dec-2016\"}:\n",
    "                max_samples = 300000  # OOM crashes without subsampling\n",
    "            est = Pipeline([(\"p\", p), (\"est\", APLRRegressor(**aplr_params))])\n",
    "        elif trial.method == \"tabpfn\":\n",
    "            est = TabPFNRegressor(**tabpfn_params)\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized regression method name {trial.method}\")\n",
    "    else:\n",
    "        raise Exception(f\"Unrecognized problem {trial.task.problem}\")\n",
    "\n",
    "    if max_samples is None:\n",
    "        pass\n",
    "    elif max_samples < len(fit_params[\"y\"]):\n",
    "        # subsample because the ML method crashes or takes too long (more than 15,000 seconds)\n",
    "        _, fit_params[\"X\"], _, fit_params[\"y\"] = train_test_split(fit_params[\"X\"], fit_params[\"y\"], test_size=max_samples, random_state=seed)\n",
    "        _ = None  # free the _ variable to make more room\n",
    "    else:\n",
    "        print(f\"Ignoring max_sample of {max_samples} since there are {len(fit_params['y'])} training samples.\")\n",
    "\n",
    "    global global_counter\n",
    "    try:\n",
    "        global_counter += 1\n",
    "    except NameError:\n",
    "        global_counter = 0\n",
    "    \n",
    "    # Train\n",
    "    print(f\"FIT: {global_counter}, {trial.task.origin}, {trial.task.name}, {trial.method}, {trial.meta}, classes:{trial.task.n_classes}, features:{fit_params['X'].shape[1]}, train_samples:{fit_params['X'].shape[0]}, orig_samples:{trial.task.n_samples}\")\n",
    "\n",
    "    if isinstance(est, (ExplainableBoostingClassifier, ExplainableBoostingRegressor)):\n",
    "        n_bytes = est.estimate_mem(fit_params[\"X\"], fit_params[\"y\"], data_multiplier=1.0 / (1.0 - test_size))\n",
    "        print(f\"EBM Memory Required: {n_bytes}\")\n",
    "        trial.log(\"mem\", n_bytes)\n",
    "    \n",
    "    with warnings.catch_warnings():\n",
    "        warnings.filterwarnings(\"ignore\")\n",
    "        gc.collect()  # clean out garbage to have as much memory available as possible\n",
    "        start_time = time()\n",
    "        est.fit(**fit_params)\n",
    "        end_time = time()\n",
    "    trial.log(\"fit_time\", end_time - start_time)\n",
    "\n",
    "    if isinstance(est, OptunaSearchCV):\n",
    "        trial.log(\"opt\", est.best_params_)\n",
    "\n",
    "    if isinstance(est, (ExplainableBoostingClassifier, ExplainableBoostingRegressor)):\n",
    "        trial.log(\"iterations\", re.sub(r'\\s+', ' ', np.array_str(est.best_iteration_)).replace('[ ', '[').replace('] [',']['))\n",
    "    \n",
    "    # clean out garbage to have as much memory available as possible\n",
    "    del fit_params\n",
    "    gc.collect()\n",
    "    \n",
    "    if trial.task.problem == \"regression\":\n",
    "        start_time = time()\n",
    "        predictions = est.predict(X_test)\n",
    "        end_time = time()\n",
    "        trial.log(\"pred_time\", end_time - start_time)\n",
    "\n",
    "        # Use NRMSE-IQR (normalized root mean square error by the interquartile range)\n",
    "        # so that datasets with large predicted values do not dominate the benchmark\n",
    "        # and the range is not sensitive to outliers. The rank is identical to RMSE.\n",
    "        # https://en.wikipedia.org/wiki/Root_mean_square_deviation\n",
    "\n",
    "        rmse = root_mean_squared_error(y_test, predictions)\n",
    "        trial.log(\"rmse\", rmse)\n",
    "        trial.log(\"nrmse\", rmse / interquartile_range)\n",
    "        trial.log(\"r2\", r2_score(y_test, predictions))\n",
    "        trial.log(\"mae\", mean_absolute_error(y_test, predictions))\n",
    "        trial.log(\"mape\", mean_absolute_percentage_error(y_test, predictions))\n",
    "        trial.log(\"medae\", median_absolute_error(y_test, predictions))\n",
    "    else:\n",
    "        start_time = time()\n",
    "        predictions = est.predict_proba(X_test)\n",
    "        end_time = time()\n",
    "        trial.log(\"pred_time\", end_time - start_time)\n",
    "\n",
    "        if trial.task.problem == \"binary\":\n",
    "            predictions = predictions[:,1]\n",
    "            trial.log(\"logloss\", log_loss(y_test, predictions))\n",
    "            trial.log(\"auc\", roc_auc_score(y_test, predictions))\n",
    "            trial.log(\"brier\", brier_score_loss(y_test, predictions))\n",
    "            predictions = (0.5 < predictions).astype(np.int16)\n",
    "            trial.log(\"precision\", precision_score(y_test, predictions, zero_division=0.0))\n",
    "            trial.log(\"recall\", recall_score(y_test, predictions, zero_division=0.0))\n",
    "            trial.log(\"accuracy\", accuracy_score(y_test, predictions))\n",
    "            trial.log(\"bal_acc\", balanced_accuracy_score(y_test, predictions))\n",
    "        else:\n",
    "            trial.log(\"xent\", log_loss(y_test, predictions))\n",
    "            trial.log('ovo_auc', roc_auc_score(y_test, predictions, multi_class='ovo', average='weighted'))\n",
    "            trial.log('ovr_auc', roc_auc_score(y_test, predictions, multi_class='ovr'))\n",
    "            # TODO: add multiclass brier_score once scikit-learn supports it (open PR now)\n",
    "            predictions = np.argmax(predictions, axis=1)\n",
    "            trial.log(\"mprecision\", precision_score(y_test, predictions, average='weighted', zero_division=0.0))\n",
    "            trial.log(\"mrecall\", recall_score(y_test, predictions, average='weighted', zero_division=0.0))\n",
    "            trial.log(\"maccuracy\", accuracy_score(y_test, predictions))\n",
    "            trial.log(\"mbal_acc\", balanced_accuracy_score(y_test, predictions))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f3e7ed60-fbd0-46b1-8bd9-b140ec31a9e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "from powerlift.bench import populate_with_datasets, retrieve_openml_cc18, retrieve_openml_automl_regression\n",
    "from powerlift.bench import retrieve_openml_automl_classification, retrieve_catboost_50k, retrieve_pmlb\n",
    "from powerlift.executors import LocalMachine, AzureContainerInstance\n",
    "from itertools import chain\n",
    "\n",
    "cache_dir=\"~/.powerlift\"\n",
    "data_retrieval = chain(\n",
    "    retrieve_openml_cc18(cache_dir=cache_dir),\n",
    "    retrieve_openml_automl_regression(cache_dir=cache_dir),\n",
    "    # retrieve_openml_automl_classification(cache_dir=cache_dir),\n",
    "    # retrieve_catboost_50k(cache_dir=cache_dir),\n",
    "    # retrieve_pmlb(cache_dir=cache_dir),\n",
    ")\n",
    "\n",
    "# This downloads datasets once and feeds into the database.\n",
    "populate_with_datasets(store, data_retrieval, exist_ok=exist_ok)\n",
    "\n",
    "if is_azure:\n",
    "    executor = AzureContainerInstance(\n",
    "        store, azure_tenant_id, subscription_id, azure_client_id, credential,\n",
    "        resource_group=resource_group,\n",
    "        pip_install=requirements + \" interpret-core\",\n",
    "        wheel_filepaths=wheel_filepaths,\n",
    "        n_instances=n_instances,\n",
    "        image=\"mcr.microsoft.com/devcontainers/python:3.12\",\n",
    "    )\n",
    "    benchmark.run(trial_runner, trial_filter, timeout=TIMEOUT_SEC, n_replicates=n_replicates, executor=executor)\n",
    "else:\n",
    "    benchmark.run(trial_runner, trial_filter, n_replicates=n_replicates, executor=LocalMachine(store, debug_mode=True))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5745059-f747-4c02-8720-0b2d49aa4261",
   "metadata": {},
   "outputs": [],
   "source": [
    "#benchmark.wait_until_complete()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5861d2b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "from datetime import datetime\n",
    "results_df = benchmark.results()\n",
    "results_df.to_csv(f\"{experiment_name}.csv\", index=None)\n",
    "\n",
    "status_df = benchmark.status()\n",
    "status_df[\"start_time\"] = (datetime.utcnow() - status_df[\"start_time\"]).dt.total_seconds() / 60.0\n",
    "print(status_df['status'].value_counts().to_string(index=True, header=False))\n",
    "print()\n",
    "if status_df[\"errmsg\"].notna().any():\n",
    "    cols=[\"task\", \"method\", \"meta\", \"errmsg\", \"n_samples\", \"n_features\", \"n_classes\", \"total_categories\"]\n",
    "    print(status_df[status_df[\"errmsg\"].notna()].reindex(columns=cols).to_string(index=False))\n",
    "    print()\n",
    "    for errmsg in status_df[\"errmsg\"]:\n",
    "        if errmsg is not None:\n",
    "            print(\"ERROR: \" + str(errmsg))\n",
    "    print()\n",
    "if (0 <= status_df[\"runner_id\"]).any():\n",
    "    cols=[\"runner_id\", \"task\", \"method\", \"meta\", \"replicate_num\", \"start_time\", \"n_samples\", \"n_features\", \"n_classes\", \"total_categories\"]\n",
    "    print(status_df[0 <= status_df[\"runner_id\"]].sort_values(by='start_time', ascending=False).reindex(columns=cols).to_string(index=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2716b066-7d8b-4163-8d5d-a1b7f0274dbb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import os\n",
    "\n",
    "pd.set_option('display.float_format', '{:.6f}'.format) \n",
    "\n",
    "# reload if analyzing later\n",
    "results_df = pd.read_csv(f'{experiment_name}.csv')\n",
    "print(f'Results (pre-filtered) count: {results_df.shape[0]}')\n",
    "\n",
    "# Optionally filter out results we want to replace\n",
    "#results_df = results_df[results_df['method'] != 'ebm']\n",
    "#results_df = results_df[(results_df['method'] != 'ebm') | (results_df['meta'] != '{}')]\n",
    "#results_df = results_df[(results_df['method'] != 'ebm') | (results_df['meta'] != '{\"interactions\": 0}')]\n",
    "print(f'Results (post-filtered) count: {results_df.shape[0]}')\n",
    "\n",
    "# Fill in results from previous runs if desired.\n",
    "basefile = 'base.csv'\n",
    "if os.path.exists(basefile):\n",
    "    filler_df = pd.DataFrame(columns=results_df.columns)\n",
    "    filler_df = pd.read_csv(basefile)\n",
    "    \n",
    "    # Optionally filter out results from the filter\n",
    "    filler_df = filler_df[filler_df['method'] != 'ebm']\n",
    "    #filler_df = filler_df[(filler_df['method'] != 'ebm') | (filler_df['meta'] != '{}')]\n",
    "    #filler_df = filler_df[(filler_df['method'] != 'ebm') | (filler_df['meta'] != '{\"interactions\": 0}')]\n",
    "    \n",
    "    key_columns = ['task', 'method', 'meta', 'replicate_num', 'name', 'seq_num']\n",
    "    filler_df = filler_df[~filler_df.set_index(key_columns).index.isin(results_df.set_index(key_columns).index)]\n",
    "    if 0 < filler_df.shape[0]:\n",
    "        results_df = pd.concat([results_df, filler_df], ignore_index=True)\n",
    "        results_df = results_df.sort_values(by=[\"task\", \"method\", \"meta\", \"replicate_num\", \"name\", \"seq_num\"])\n",
    "        results_df.to_csv(\"merged.csv\", index=None)\n",
    "    print(f'Filter count: {filler_df.shape[0]}')\n",
    "    print(f'Results count: {results_df.shape[0]}')\n",
    "    #print(filler_df.to_string())\n",
    "\n",
    "types_df = results_df[results_df['name'].isin(['auc', 'ovo_auc', 'nrmse'])]\n",
    "task_to_type = types_df.groupby('task')['name'].first().map({'auc': 'binary', 'ovo_auc': 'multiclass', 'nrmse': 'regression'})\n",
    "results_df['type'] = results_df['task'].map(task_to_type).fillna('')\n",
    "\n",
    "flip = ['r2', 'auc', 'precision', 'recall', 'accuracy', 'bal_acc', 'ovo_auc', 'ovr_auc', 'mprecision', 'mrecall', 'maccuracy', 'mbal_acc']\n",
    "condition = results_df['name'].isin(flip)\n",
    "results_df.loc[condition, 'num_val'] = -results_df.loc[condition, 'num_val']\n",
    "\n",
    "# Optionally filter out any incomplete datasets\n",
    "results_df = results_df[results_df['task'] != 'Devnagari-Script']\n",
    "results_df = results_df[results_df['task'] != 'mnist_784']\n",
    "results_df = results_df[results_df['task'] != 'isolet']\n",
    "results_df = results_df[results_df['task'] != 'Fashion-MNIST']\n",
    "results_df = results_df[results_df['task'] != 'har']\n",
    "#results_df = results_df[results_df['task'] != 'CIFAR_10']\n",
    "#results_df = results_df[results_df['task'] != 'Airlines_DepDelay_10M']\n",
    "#results_df = results_df[results_df['task'] != 'cnae-9']\n",
    "#results_df = results_df[results_df['task'] != 'MiceProtein']\n",
    "#\n",
    "#results_df = results_df[results_df['type'] != 'binary']\n",
    "#results_df = results_df[results_df['type'] != 'multiclass']\n",
    "#results_df = results_df[results_df['type'] != 'regression']\n",
    "#\n",
    "#results_df = results_df[(results_df['method'] != 'ebm') | (results_df['meta'] == '{\"interactions\": 0}') | (results_df['meta'] == '{}')]\n",
    "#results_df = results_df[((results_df['method'] == 'ebm') & (results_df['meta'] == '{}')) | (results_df['method'] == 'xgb')]\n",
    "#\n",
    "#results_df = results_df[\n",
    "#    (results_df['task'] == 'CIFAR_10') | \n",
    "#    (results_df['task'] == 'Fashion-MNIST') | \n",
    "#    (results_df['task'] == 'har') | \n",
    "#    (results_df['task'] == 'mnist_784') | \n",
    "#    (results_df['task'] == 'isolet') | \n",
    "#    (results_df['task'] == 'Allstate_Claims_Severity') | \n",
    "#    (results_df['task'] == 'Airlines_DepDelay_10M') | \n",
    "#    (results_df['task'] == 'Buzzinsocialmedia_Twitter') | \n",
    "#    (results_df['task'] == 'nyc-taxi-green-dec-2016') | \n",
    "#    (results_df['task'] == 'cnae-9') | \n",
    "#    (results_df['task'] == 'Santander_transaction_value') | \n",
    "#    (results_df['task'] == 'Yolanda') |\n",
    "#    (results_df['task'] == 'Bioresponse')\n",
    "#]\n",
    "\n",
    "# TabPFN (no categoricals)\n",
    "# results_df = results_df[(results_df['task'] == 'banknote-authentication') | (results_df['task'] == 'wdbc') | (results_df['task'] == 'breast-w') | (results_df['task'] == 'spambase') | (results_df['task'] == 'ozone-level-8hr') | (results_df['task'] == 'wilt') | (results_df['task'] == 'diabetes') | (results_df['task'] == 'qsar-biodeg') | (results_df['task'] == 'kc2') | (results_df['task'] == 'pc4') | (results_df['task'] == 'climate-model-simulation-crashes') | (results_df['task'] == 'kc1') | (results_df['task'] == 'blood-transfusion-service-center') | (results_df['task'] == 'pc3') | (results_df['task'] == 'phoneme') | (results_df['task'] == 'pc1') | (results_df['task'] == 'balance-scale') | (results_df['task'] == 'analcatdata_authorship') | (results_df['task'] == 'MiceProtein') | (results_df['task'] == 'wall-robot-navigation') | (results_df['task'] == 'optdigits') | (results_df['task'] == 'mfeat-karhunen') | (results_df['task'] == 'semeion') | (results_df['task'] == 'mfeat-factors') | (results_df['task'] == 'mfeat-pixel') | (results_df['task'] == 'segment') | (results_df['task'] == 'satimage') | (results_df['task'] == 'mfeat-morphological') | (results_df['task'] == 'mfeat-fourier') | (results_df['task'] == 'mfeat-zernike') | (results_df['task'] == 'steel-plates-fault') | (results_df['task'] == 'vehicle') | (results_df['task'] == 'quake') | (results_df['task'] == 'us_crime') | (results_df['task'] == 'space_ga') | (results_df['task'] == 'wine_quality') | (results_df['task'] == 'tecator')]\n",
    "\n",
    "print(f'Final count: {results_df.shape[0]}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7398abbf-3a1f-4eaa-8dda-dfe3521077a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "averages = results_df.groupby(['method', 'meta', 'name'])['num_val'].mean().unstack().reset_index()\n",
    "\n",
    "metric_ranks = results_df.pivot_table('num_val', ['task', 'name'], ['method', 'meta', 'replicate_num'])\n",
    "metric_ranks = metric_ranks.rank(axis=1, ascending=True, method='min')\n",
    "metric_ranks = metric_ranks.stack(level='replicate_num', future_stack=True)\n",
    "metric_ranks = metric_ranks.groupby('name').mean().transpose()\n",
    "metric_ranks.columns = [f\"{col}_RANK\" for col in metric_ranks.columns]\n",
    "metric_ranks = metric_ranks.reset_index()\n",
    "\n",
    "overall_rank = results_df[results_df['name'].isin(['auc', 'ovo_auc', 'nrmse'])]\n",
    "overall_rank = overall_rank.pivot_table('num_val', 'task', ['method', 'meta', 'replicate_num'])\n",
    "overall_rank = overall_rank.rank(axis=1, ascending=True, method='min')\n",
    "overall_rank = overall_rank.stack(level='replicate_num', future_stack=True)\n",
    "overall_rank = overall_rank.mean()\n",
    "overall_rank = overall_rank.to_frame(name='RANK').reset_index()\n",
    "\n",
    "desired_columns = ['method', 'meta', 'RANK', 'auc', 'ovo_auc', 'nrmse', 'auc_RANK', 'ovo_auc_RANK', 'nrmse_RANK', 'fit_time', 'pred_time']\n",
    "combined_df = averages.merge(metric_ranks, on=['method', 'meta']).merge(overall_rank, on=['method', 'meta'])\n",
    "combined_df = combined_df.sort_values(by='RANK')\n",
    "combined_df = combined_df.reindex(columns=desired_columns)\n",
    "\n",
    "print(\"METHOD METRICS:\\n\")\n",
    "print(combined_df.to_string(index=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c2df600-d3e0-430e-8dda-836c09825987",
   "metadata": {},
   "outputs": [],
   "source": [
    "desired_columns = ['method', 'meta', 'RANK', 'auc', 'ovo_auc', 'nrmse', 'fit_time', 'pred_time']\n",
    "row_order = combined_df[['method', 'meta']]\n",
    "\n",
    "counts = results_df.groupby(['method', 'meta', 'name']).size().unstack()\n",
    "counts = counts.reindex(row_order, axis=0).reset_index()\n",
    "counts['RANK'] = 0\n",
    "if 'auc' in counts.columns:\n",
    "    counts['RANK'] += counts['auc']\n",
    "if 'ovo_auc' in counts.columns:\n",
    "    counts['RANK'] += counts['ovo_auc']\n",
    "if 'nrmse' in counts.columns:\n",
    "    counts['RANK'] += counts['nrmse']\n",
    "counts = counts.reindex(columns=desired_columns)\n",
    "counts = counts.fillna(0)\n",
    "count_columns = counts.columns.drop(['method', 'meta'])\n",
    "counts[count_columns] = counts[count_columns].astype(int)\n",
    "print(\"METHOD COUNTS:\\n\")\n",
    "print(counts.to_string(index=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9f4a87a2-3dd7-4cef-905f-ed00b3a1065a",
   "metadata": {},
   "outputs": [],
   "source": [
    "filtered_df = results_df[results_df['name'].isin(['auc', 'ovo_auc', 'rmse'])]\n",
    "grouped = filtered_df.groupby(['task', 'method', 'meta', 'type'])['num_val'].agg(['mean']).reset_index()\n",
    "pivot_table = grouped.pivot_table(index=['task', 'type'], columns=['method', 'meta'], values=['mean'])\n",
    "pivot_table.columns = pivot_table.columns.droplevel(0)\n",
    "pivot_table = pivot_table.reindex(row_order, axis=1)\n",
    "\n",
    "ratio_column = ('ebm', '{}')\n",
    "if ratio_column in pivot_table.columns:\n",
    "    non_ebm_columns = [col for col in pivot_table.columns if col[0] != ratio_column[0]]\n",
    "    ratio_col = (pivot_table[ratio_column] / pivot_table[non_ebm_columns].min(axis=1)).values\n",
    "    ratio_col[pivot_table[ratio_column] < 0] *= -1\n",
    "    pivot_table = pivot_table.reset_index(-1)\n",
    "    pivot_table.insert(0, 'ratio', ratio_col)\n",
    "    pivot_table = pivot_table.sort_values(by=['type', 'ratio'])\n",
    "else:\n",
    "    pivot_table = pivot_table.reset_index(-1)\n",
    "    pivot_table = pivot_table.sort_values(by=['type', 'task'])\n",
    "task_order = pivot_table.index\n",
    "pivot_table = pivot_table.reset_index()\n",
    "print(\"TASK MEAN (auc, ovo_auc, rmse):\\n\")\n",
    "print(pivot_table.to_string())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "077607b5-06d5-4673-bb93-c9cd527c73fd",
   "metadata": {},
   "outputs": [],
   "source": [
    "filtered_df = results_df[results_df['name'].isin(['auc', 'ovo_auc', 'rmse'])]\n",
    "grouped = filtered_df.groupby(['task', 'method', 'meta', 'type'])['num_val'].agg(['std']).reset_index()\n",
    "pivot_table = grouped.pivot_table(index=['task', 'type'], columns=['method', 'meta'], values=['std'])\n",
    "pivot_table.columns = pivot_table.columns.droplevel(0)\n",
    "pivot_table = pivot_table.reindex(row_order, axis=1)\n",
    "pivot_table = pivot_table.reset_index(-1).reindex(task_order, axis=0).reset_index()\n",
    "print(\"TASK STANDARD DEVIATION (auc, ovo_auc, rmse):\\n\")\n",
    "print(pivot_table.to_string())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7321648e-3fc6-486f-af3c-c07eca46ccb8",
   "metadata": {},
   "outputs": [],
   "source": [
    "filtered_df = results_df[results_df['name'].isin(['auc', 'ovo_auc', 'rmse'])]\n",
    "grouped = filtered_df.groupby(['task', 'method', 'meta', 'type'])['num_val'].agg(['count']).reset_index()\n",
    "pivot_table = grouped.pivot_table(index=['task', 'type'], columns=['method', 'meta'], values=['count'])\n",
    "pivot_table = pivot_table.fillna(0).astype(int)\n",
    "pivot_table.columns = pivot_table.columns.droplevel(0)\n",
    "pivot_table = pivot_table.reindex(row_order, axis=1)\n",
    "pivot_table = pivot_table.reset_index(-1).reindex(task_order, axis=0).reset_index()\n",
    "print(\"TASK COUNT:\\n\")\n",
    "print(pivot_table.to_string())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b56d7454-e10c-4839-8fb2-0af94cf56837",
   "metadata": {},
   "outputs": [],
   "source": [
    "filtered_df = results_df[results_df['name'].isin(['fit_time', 'pred_time'])]\n",
    "grouped = filtered_df.groupby(['task', 'method', 'meta', 'type', 'name'])['num_val'].agg(['mean']).reset_index()\n",
    "pivot_table = grouped.pivot_table(index=['task', 'method', 'meta', 'type'], columns=['name'], values=['mean'])\n",
    "pivot_table.columns = pivot_table.columns.droplevel(0)  # drop the compound column term \"mean\"\n",
    "pivot_table = pivot_table.dropna(subset=['fit_time', 'pred_time'])\n",
    "times = pivot_table['fit_time'] + pivot_table['pred_time']\n",
    "times = times.to_frame(name='time')\n",
    "pivot_table = times.pivot_table(index=['task', 'type'], columns=['method', 'meta'], values=['time'])\n",
    "pivot_table.columns = pivot_table.columns.droplevel(0)  # drop the compound column term \"time\"\n",
    "pivot_table = pivot_table.reindex(row_order, axis=1)\n",
    "pivot_table = pivot_table.reset_index(-1).reindex(task_order, axis=0).reset_index()\n",
    "print(\"TASK TIME (fit_time + pred_time):\\n\")\n",
    "print(pivot_table.to_string())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7cb0e10e-6825-4764-84f9-934d1ed4e71e",
   "metadata": {},
   "outputs": [],
   "source": [
    "fit_times = results_df[results_df['name'] == 'fit_time']\n",
    "fit_times = fit_times.pivot_table('num_val', 'task', ['method', 'meta'])\n",
    "fit_times = fit_times.dropna()\n",
    "fit_times[\"ratios\"] = fit_times[('ebm', '{}')] / fit_times[('xgb', '{}')]\n",
    "import numpy as np\n",
    "fit_times_deciles = np.percentile(fit_times[\"ratios\"], [90, 80, 70, 60, 50, 40, 30, 20, 10])\n",
    "fit_times_deciles = [f\"{decile:.2f}  \" for decile in fit_times_deciles]\n",
    "max_ratio= fit_times[\"ratios\"].max()\n",
    "min_ratio= fit_times[\"ratios\"].min()\n",
    "print(\"FIT TIME RATIO DECILES:\\n\")\n",
    "print(*fit_times_deciles)\n",
    "print(f\"max: {max_ratio:.2f}\")\n",
    "print(f\"min: {min_ratio:.2f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cf52d725-bf78-400b-99c3-f2600bf2bc4f",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "import json\n",
    "import ast\n",
    "\n",
    "n_histogram_bins = 20\n",
    "log_scale_params = [\n",
    "    'smoothing_rounds', 'max_bins', 'max_interaction_bins',\n",
    "    'interaction_smoothing_rounds', 'learning_rate',\n",
    "    'min_samples_leaf', 'min_hessian', 'early_stopping_tolerance'\n",
    "]\n",
    "\n",
    "ebm_opt_df = results_df[(results_df['method'] == 'ebm_opt') & (results_df['name'] == 'opt')]['json_val']\n",
    "json_dicts = ebm_opt_df.apply(lambda x: ast.literal_eval(x))\n",
    "\n",
    "avg_dict = {}\n",
    "\n",
    "for d in json_dicts:\n",
    "    for key, value in d.items():\n",
    "        if key in avg_dict:\n",
    "            avg_dict[key] += value / len(json_dicts)\n",
    "        else:\n",
    "            avg_dict[key] = value / len(json_dicts)\n",
    "\n",
    "print(avg_dict)\n",
    "\n",
    "for key in avg_dict.keys():\n",
    "    values = [d[key] for d in json_dicts]\n",
    "    low =  min(values)\n",
    "    high = max(values)\n",
    "\n",
    "    plt.figure(figsize=(10, 5))\n",
    "\n",
    "    if key in log_scale_params:\n",
    "        bins = np.logspace(np.log10(low), np.log10(high), n_histogram_bins)\n",
    "        plt.hist(values, bins=bins, alpha=0.75)\n",
    "        plt.xscale('log')\n",
    "    else:\n",
    "        plt.hist(values, bins=n_histogram_bins, alpha=0.75)\n",
    "\n",
    "    plt.title(f'Histogram of {key}')\n",
    "    plt.xlabel(key)\n",
    "    plt.ylabel('Frequency')\n",
    "    plt.grid(True)\n",
    "\n",
    "    plt.axvline(x=low, color='r', linestyle='--', label=f'Low: {low}')\n",
    "    plt.axvline(x=high, color='g', linestyle='--', label=f'High: {high}')\n",
    "    plt.legend()\n",
    "\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e2e25dfd-02e7-4e14-9187-fc5c7db3ca89",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "iterations_df = results_df[results_df['name'] == 'iterations'].copy()\n",
    "iterations_df['iterations_array'] = iterations_df['str_val'].apply(lambda x: np.array(eval(x.replace('][', '],[').replace(' ', ','))))\n",
    "iterations_df = iterations_df.groupby(['task', 'method', 'meta'])['iterations_array'].apply(lambda x: np.mean(np.stack(x), axis=0)).reset_index()\n",
    "iterations_df['iterations_array'] = iterations_df['iterations_array'].apply(lambda x: x.mean(axis=1))\n",
    "iterations_overall_df = iterations_df.groupby(['method', 'meta'])['iterations_array'].apply(\n",
    "    lambda x: pd.Series({\n",
    "        'avg_index_0': np.nanmean([arr[0] for arr in x if len(arr) > 0]) if any(len(arr) > 0 for arr in x) else np.nan,\n",
    "        'avg_index_1': np.nanmean([arr[1] for arr in x if len(arr) > 1]) if any(len(arr) > 1 for arr in x) else np.nan\n",
    "    })\n",
    ").reset_index()\n",
    "iterations_overall_df = iterations_overall_df.pivot_table(columns='level_2', index=['method', 'meta'], values='iterations_array').reset_index()\n",
    "print(\"EBM ITERATIONS:\\n\")\n",
    "print(iterations_overall_df.to_string(index=False))\n",
    "\n",
    "iterations_df = iterations_df.groupby(['task', 'method', 'meta'])['iterations_array'].apply(\n",
    "    lambda x: pd.Series({\n",
    "        'avg_index_0': np.nanmean([arr[0] for arr in x if len(arr) > 0]) if any(len(arr) > 0 for arr in x) else np.nan,\n",
    "        'avg_index_1': np.nanmean([arr[1] for arr in x if len(arr) > 1]) if any(len(arr) > 1 for arr in x) else np.nan\n",
    "    })\n",
    ").reset_index()\n",
    "iterations_df = iterations_df.pivot_table(columns='level_3', index=['task', 'method', 'meta'], values='iterations_array').reset_index()\n",
    "iterations_df['ratio'] = iterations_df['avg_index_0'] / iterations_df['avg_index_1']\n",
    "print(\"\\nEBM ITERATIONS per dataset:\\n\")\n",
    "print(iterations_df.to_string(index=False))"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
