{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e6bc27b1-7903-4d36-9198-190923b85d71",
   "metadata": {},
   "outputs": [],
   "source": [
    "# use exact versions of these in order to preserve RANK ordering better\n",
    "!pip install -U --quiet numpy==1.26.4 pandas==2.2.2 scikit-learn==1.5.1 xgboost==2.1.0"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8489becf-d522-42a0-af6e-ec99c12d6a87",
   "metadata": {},
   "outputs": [],
   "source": [
    "# install interpret if not already installed\n",
    "try:\n",
    "    import interpret\n",
    "except ModuleNotFoundError:\n",
    "    !pip install -U --quiet interpret-core"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5674068-d971-49df-b8a1-60bf91441def",
   "metadata": {},
   "outputs": [],
   "source": [
    "# install powerlift if not already installed\n",
    "\n",
    "# !! IMPORTANT !! : until the next release, install locally with \"pip install -e .[datasets,postgres]\" from powerlift directory\n",
    "\n",
    "try:\n",
    "    import powerlift\n",
    "except ModuleNotFoundError:\n",
    "    !pip install -U --quiet powerlift[datasets,postgres]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e7cfe45-d0f1-4951-953e-cafefee1ae2d",
   "metadata": {},
   "outputs": [],
   "source": [
    "def trial_filter(task):\n",
    "    min_samples = 1\n",
    "    max_samples = 1000000000000\n",
    "    min_features = 1\n",
    "    max_features = 1000000000000\n",
    "    if task.scalar_measure(\"n_rows\") < min_samples:\n",
    "        return []\n",
    "    if max_samples < task.scalar_measure(\"n_rows\"):\n",
    "        return []\n",
    "    if task.scalar_measure(\"n_cols\") < min_features:\n",
    "        return []\n",
    "    if max_features < task.scalar_measure(\"n_cols\"):\n",
    "        return []\n",
    "\n",
    "    \n",
    "    if task.origin == \"openml_automl_regression\":\n",
    "        pass  # include in benchmark\n",
    "    elif task.origin == \"openml_automl_classification\":\n",
    "        return []\n",
    "    elif task.origin == \"openml_cc18\":\n",
    "        pass  # include in benchmark\n",
    "    elif task.origin == \"pmlb\":\n",
    "        if task.problem == \"binary\":\n",
    "            return []\n",
    "        elif task.problem == \"multiclass\":\n",
    "            return []\n",
    "        elif task.problem == \"regression\":\n",
    "            return []\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized problem {task.problem}\")\n",
    "    else:\n",
    "        raise Exception(f\"Unrecognized origin {task.origin}\")\n",
    "\n",
    "    \n",
    "    exclude_set = set()\n",
    "#    exclude_set = set(['isolet', 'Devnagari-Script', 'CIFAR_10', 'Airlines_DepDelay_10M'])\n",
    "#    exclude_set = set([\n",
    "#        'Fashion-MNIST', 'mfeat-pixel', 'Bioresponse',\n",
    "#        'mfeat-factors', 'isolet', 'cnae-9', \"Internet-Advertisements\",\n",
    "#        'har', 'Devnagari-Script', 'mnist_784', 'CIFAR_10',\n",
    "#        'Airlines_DepDelay_10M',\n",
    "#    ])\n",
    "    if task.name in exclude_set:\n",
    "        return []\n",
    "\n",
    "\n",
    "    # exclude duplicates of a dataset if they appear twice\n",
    "    global global_duplicates\n",
    "    try:\n",
    "        duplicates = global_duplicates\n",
    "    except NameError:\n",
    "        duplicates = set()\n",
    "        global_duplicates = duplicates\n",
    "    key = (task.name, task.scalar_measure(\"n_rows\"), task.scalar_measure(\"n_cols\"))\n",
    "    if key in duplicates:\n",
    "        print(f\"Excluding duplicate: {key}\")\n",
    "        return []\n",
    "    else:\n",
    "        duplicates.add(key)\n",
    "\n",
    "\n",
    "    return [\n",
    "        \"xgboost-base\",\n",
    "        \"ebm-base\",\n",
    "    ]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8b43ca15-4a20-4622-a877-0a5af322b2cb",
   "metadata": {},
   "outputs": [],
   "source": [
    "def trial_runner(trial):\n",
    "    seed=42\n",
    "    max_interaction_features=1000\n",
    "    ebm_base_params = {}\n",
    "    xgb_base_params = {}\n",
    "    # ebm_base_params = {\"max_rounds\":2, \"interactions\":0}\n",
    "    # xgb_base_params = {\"n_estimators\":1}\n",
    "\n",
    "    if max_interaction_features < trial.task.scalar_measure(\"n_cols\"):\n",
    "        # TODO: EBMs can crash for now with too many interactions, so limit it until we have better fix\n",
    "        ebm_base_params[\"interactions\"] = 0\n",
    "\n",
    "    from xgboost import XGBClassifier, XGBRegressor\n",
    "    from interpret.glassbox import ExplainableBoostingClassifier, ExplainableBoostingRegressor\n",
    "    from sklearn.metrics import roc_auc_score, root_mean_squared_error, log_loss\n",
    "    from sklearn.model_selection import train_test_split\n",
    "    import numpy as np\n",
    "    from time import time\n",
    "    import warnings\n",
    "\n",
    "    X, y, meta = trial.task.data([\"X\", \"y\", \"meta\"])\n",
    "\n",
    "    categoricals = meta[\"categorical_mask\"]\n",
    "    # XGB and EBM already handle this via CategoricalDtype but make it clear\n",
    "    xgb_feature_types = [\"c\" if cat else \"q\" for cat in categoricals]\n",
    "    ebm_feature_types = [\"nominal\" if cat else \"continuous\" for cat in categoricals]\n",
    "\n",
    "    stratification = None\n",
    "    if trial.task.problem in [\"binary\", \"multiclass\"]:\n",
    "        # TODO: consider eliminating stratification, although keep it for now if it decreases variance between seeds\n",
    "        #\n",
    "        # Use stratified, otherwise eval can fail if one of the classes is not in the training set\n",
    "        # Also, stratified probably decreases the variance between benchmarks when the random seed changes\n",
    "        stratification = y\n",
    "    \n",
    "    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, stratify=stratification, random_state=seed)\n",
    "\n",
    "    # Specify method\n",
    "    if trial.task.problem in [\"binary\", \"multiclass\"]:\n",
    "        if trial.method.name == \"xgboost-base\":\n",
    "            est = XGBClassifier(enable_categorical=True, feature_types=xgb_feature_types, **xgb_base_params)\n",
    "        elif trial.method.name == \"ebm-base\":\n",
    "            est = ExplainableBoostingClassifier(feature_types=ebm_feature_types, **ebm_base_params)\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized method name {trial.method.name}\")\n",
    "\n",
    "        predict_fn = est.predict_proba\n",
    "    elif trial.task.problem == \"regression\":\n",
    "        if trial.method.name == \"xgboost-base\":\n",
    "            est = XGBRegressor(enable_categorical=True, feature_types=xgb_feature_types, **xgb_base_params)\n",
    "        elif trial.method.name == \"ebm-base\":\n",
    "            est = ExplainableBoostingRegressor(feature_types=ebm_feature_types, **ebm_base_params)\n",
    "        else:\n",
    "            raise Exception(f\"Unrecognized method name {trial.method.name}\")\n",
    "\n",
    "        predict_fn = est.predict\n",
    "    else:\n",
    "        raise Exception(f\"Unrecognized problem {trial.task.problem}\")\n",
    "\n",
    "    global global_counter\n",
    "    try:\n",
    "        global_counter += 1\n",
    "    except NameError:\n",
    "        global_counter = 0\n",
    "    \n",
    "    # Train\n",
    "    print(f\"FIT: {global_counter}, {trial.task.origin}, {trial.task.name}, {trial.method.name}, \", end=\"\")\n",
    "    with warnings.catch_warnings():\n",
    "        warnings.filterwarnings(\"ignore\")\n",
    "        start_time = time()\n",
    "        est.fit(X_train, y_train)\n",
    "        elapsed_time = time() - start_time\n",
    "    trial.log(\"fit_time\", elapsed_time)\n",
    "    \n",
    "    # Predict\n",
    "    start_time = time()\n",
    "    predictions = predict_fn(X_test)\n",
    "    elapsed_time = time() - start_time\n",
    "    trial.log(\"predict_time\", elapsed_time)\n",
    "\n",
    "    if trial.task.problem == \"binary\":\n",
    "        predictions = predictions[:,1]\n",
    "\n",
    "        eval_score = roc_auc_score(y_test, predictions)\n",
    "        trial.log(\"auc\", eval_score)\n",
    "\n",
    "        eval_score2 = log_loss(y_test, predictions)\n",
    "        trial.log(\"log_loss\", eval_score2)\n",
    "    elif trial.task.problem == \"multiclass\":\n",
    "        eval_score = roc_auc_score(y_test, predictions, average=\"weighted\", multi_class=\"ovo\")\n",
    "        trial.log(\"multi_auc\", eval_score)\n",
    "\n",
    "        eval_score2 = log_loss(y_test, predictions)\n",
    "        trial.log(\"cross_entropy\", eval_score2)\n",
    "    elif trial.task.problem == \"regression\":\n",
    "        # Use NRMSE-IQR (normalized root mean square error by the interquartile range)\n",
    "        # so that datasets with large predicted values do not dominate the benchmark\n",
    "        # and the range is not sensitive to outliers. The rank is identical to RMSE.\n",
    "        # https://en.wikipedia.org/wiki/Root_mean_square_deviation\n",
    "\n",
    "        # Get quartile_range from the full dataset for consistency across seeds.\n",
    "        q75, q25 = np.percentile(y, [75, 25])\n",
    "        interquartile_range = q75 - q25\n",
    "\n",
    "        eval_score = root_mean_squared_error(y_test, predictions) / interquartile_range\n",
    "        trial.log(\"nrmse\", eval_score)\n",
    "    else:\n",
    "        raise Exception(f\"Unrecognized problem {trial.task.problem}\")\n",
    "\n",
    "    print(eval_score)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6c226e77-753a-4e1c-bb6b-d80156845785",
   "metadata": {},
   "outputs": [],
   "source": [
    "force_recreate=False\n",
    "exist_ok=True\n",
    "\n",
    "import uuid\n",
    "exp_id = str(uuid.uuid4())\n",
    "experiment_name = \"myexperiment\" + \"__\" + exp_id\n",
    "print(\"Experiment name: \" + experiment_name)\n",
    "\n",
    "from powerlift.bench import retrieve_openml_automl_regression, retrieve_openml_automl_classification, retrieve_openml_cc18, retrieve_catboost_50k, retrieve_pmlb\n",
    "from powerlift.bench import Benchmark, Store, populate_with_datasets\n",
    "from powerlift.executors import LocalMachine\n",
    "from itertools import chain\n",
    "import os\n",
    "\n",
    "# Initialize database (if needed).\n",
    "store = Store(f\"sqlite:///{os.getcwd()}/powerlift.db\", force_recreate=force_recreate)\n",
    "\n",
    "cache_dir=\"~/.powerlift\"\n",
    "data_retrieval = chain(\n",
    "    retrieve_openml_automl_regression(cache_dir=cache_dir),\n",
    "    # retrieve_openml_automl_classification(cache_dir=cache_dir),\n",
    "    retrieve_openml_cc18(cache_dir=cache_dir),\n",
    "    # retrieve_catboost_50k(cache_dir=cache_dir),\n",
    "    # retrieve_pmlb(cache_dir=cache_dir),\n",
    ")\n",
    "\n",
    "# This downloads datasets once and feeds into the database.\n",
    "populate_with_datasets(store, data_retrieval, exist_ok=exist_ok)\n",
    "\n",
    "# Run experiment\n",
    "benchmark = Benchmark(store, name=experiment_name)\n",
    "benchmark.run(trial_runner, trial_filter, executor=LocalMachine(store, debug_mode=True))\n",
    "\n",
    "benchmark.wait_until_complete()\n",
    "\n",
    "results_df = benchmark.results()\n",
    "results_df.to_csv(f\"results-{exp_id}.csv\", index=None)\n",
    "\n",
    "status_df = benchmark.status()\n",
    "for errmsg in status_df[\"errmsg\"]:\n",
    "    if errmsg is not None:\n",
    "        print(\"ERROR: \" + str(errmsg))\n",
    "print(status_df['status'].value_counts().to_string(index=True, header=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e5745059-f747-4c02-8720-0b2d49aa4261",
   "metadata": {},
   "outputs": [],
   "source": [
    "# re-establish connection\n",
    "#benchmark = Benchmark(conn_str, name=experiment_name)\n",
    "\n",
    "# reload if analyzing later\n",
    "import pandas as pd\n",
    "results_df = pd.read_csv(f\"results-{exp_id}.csv\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2716b066-7d8b-4163-8d5d-a1b7f0274dbb",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "averages = results_df.groupby(['method', 'name'])['num_val'].mean().unstack().reset_index()\n",
    "\n",
    "metric_ranks = results_df.pivot_table('num_val', ['task', 'name'], 'method')\n",
    "metric_ranks = metric_ranks.rank(axis=1, ascending=True, method='min')\n",
    "metric_ranks = metric_ranks.groupby('name').mean().transpose()\n",
    "metric_ranks.columns = [f\"{col}_RANK\" for col in metric_ranks.columns]\n",
    "metric_ranks = metric_ranks.reset_index()\n",
    "\n",
    "overall_rank = results_df[results_df['name'].isin(['log_loss', 'cross_entropy', 'nrmse'])]\n",
    "overall_rank = overall_rank.pivot_table('num_val', 'task', 'method')\n",
    "overall_rank = overall_rank.rank(axis=1, ascending=True, method='min')\n",
    "overall_rank = overall_rank.mean()\n",
    "overall_rank = overall_rank.to_frame(name='RANK').reset_index()\n",
    "\n",
    "desired_columns = ['method', 'RANK', 'auc', 'multi_auc', 'nrmse', 'log_loss_RANK', 'cross_entropy_RANK', 'nrmse_RANK', 'fit_time', 'predict_time']\n",
    "combined_df = averages.merge(metric_ranks, on='method').merge(overall_rank, on='method')\n",
    "combined_df = combined_df.sort_values(by='RANK')\n",
    "combined_df = combined_df.reindex(columns=desired_columns)\n",
    "\n",
    "print(combined_df.to_string(index=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7c2df600-d3e0-430e-8dda-836c09825987",
   "metadata": {},
   "outputs": [],
   "source": [
    "desired_columns = ['method', 'RANK', 'auc', 'multi_auc', 'nrmse', 'log_loss', 'cross_entropy', 'fit_time', 'predict_time']\n",
    "row_order = combined_df[\"method\"]\n",
    "\n",
    "counts = results_df.groupby(['method', 'name']).size().unstack()\n",
    "counts = counts.reindex(row_order, axis=0).reset_index()\n",
    "counts['RANK'] = counts['log_loss'] + counts['cross_entropy'] + counts['nrmse']\n",
    "counts = counts.reindex(columns=desired_columns)\n",
    "print(counts.to_string(index=False))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "51e78875-935d-4fcd-847b-307be385b3cc",
   "metadata": {},
   "outputs": [],
   "source": [
    "fit_times = results_df[results_df['name'] == 'fit_time']\n",
    "fit_times = fit_times.pivot_table('num_val', 'task', 'method')\n",
    "fit_times = fit_times.dropna()\n",
    "fit_times[\"ratios\"] = fit_times['ebm-base'] / fit_times['xgboost-base']\n",
    "import numpy as np\n",
    "fit_times_deciles = np.percentile(fit_times[\"ratios\"], [90, 80, 70, 60, 50, 40, 30, 20, 10])\n",
    "fit_times_deciles = [f\"{decile:.2f}  \" for decile in fit_times_deciles]\n",
    "max_ratio= fit_times[\"ratios\"].max()\n",
    "min_ratio= fit_times[\"ratios\"].min()\n",
    "print(\"fit time ratio deciles:\")\n",
    "print(*fit_times_deciles)\n",
    "print(f\"max: {max_ratio:.2f}\")\n",
    "print(f\"min: {min_ratio:.2f}\")"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
