{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "bae5ff9c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "numpy       : 1.21.2\n",
      "pandas      : 1.2.3\n",
      "scikit-learn: 1.0\n",
      "xgboost     : 1.3.3\n",
      "lightgbm    : 3.2.1\n",
      "catboost    : 0.26.1\n",
      "\n"
     ]
    }
   ],
   "source": [
    "%load_ext watermark\n",
    "%watermark -p numpy,pandas,scikit-learn,xgboost,lightgbm,catboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "6c93fa42",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a6ccb913",
   "metadata": {},
   "source": [
    "# Demo Notebook Illustrating How To Use Common Gradient Boosting Implementations With Categorical Data"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0649740c",
   "metadata": {},
   "source": [
    "# Dataset Loading"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b2cbe111",
   "metadata": {},
   "source": [
    "- To keep things simple, we will be using the Titanic dataset. Consequently, please don't overinterpret the predictive performance values. This is more intended as a technical demo/reference for how to use categorical support, not how to achieve good predictive performance.\n",
    "- Titanic dataset reference: https://www.openml.org/d/40945"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "1c5ead9a",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>pclass</th>\n",
       "      <th>survived</th>\n",
       "      <th>name</th>\n",
       "      <th>sex</th>\n",
       "      <th>age</th>\n",
       "      <th>sibsp</th>\n",
       "      <th>parch</th>\n",
       "      <th>ticket</th>\n",
       "      <th>fare</th>\n",
       "      <th>cabin</th>\n",
       "      <th>embarked</th>\n",
       "      <th>boat</th>\n",
       "      <th>body</th>\n",
       "      <th>home.dest</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>Allen, Miss. Elisabeth Walton</td>\n",
       "      <td>female</td>\n",
       "      <td>29</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>24160</td>\n",
       "      <td>211.3375</td>\n",
       "      <td>B5</td>\n",
       "      <td>S</td>\n",
       "      <td>2</td>\n",
       "      <td>?</td>\n",
       "      <td>St Louis, MO</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>Allison, Master. Hudson Trevor</td>\n",
       "      <td>male</td>\n",
       "      <td>0.9167</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>113781</td>\n",
       "      <td>151.55</td>\n",
       "      <td>C22 C26</td>\n",
       "      <td>S</td>\n",
       "      <td>11</td>\n",
       "      <td>?</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Miss. Helen Loraine</td>\n",
       "      <td>female</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>113781</td>\n",
       "      <td>151.55</td>\n",
       "      <td>C22 C26</td>\n",
       "      <td>S</td>\n",
       "      <td>?</td>\n",
       "      <td>?</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Mr. Hudson Joshua Creighton</td>\n",
       "      <td>male</td>\n",
       "      <td>30</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>113781</td>\n",
       "      <td>151.55</td>\n",
       "      <td>C22 C26</td>\n",
       "      <td>S</td>\n",
       "      <td>?</td>\n",
       "      <td>135</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>Allison, Mrs. Hudson J C (Bessie Waldo Daniels)</td>\n",
       "      <td>female</td>\n",
       "      <td>25</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>113781</td>\n",
       "      <td>151.55</td>\n",
       "      <td>C22 C26</td>\n",
       "      <td>S</td>\n",
       "      <td>?</td>\n",
       "      <td>?</td>\n",
       "      <td>Montreal, PQ / Chesterville, ON</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   pclass  survived                                             name     sex  \\\n",
       "0       1         1                    Allen, Miss. Elisabeth Walton  female   \n",
       "1       1         1                   Allison, Master. Hudson Trevor    male   \n",
       "2       1         0                     Allison, Miss. Helen Loraine  female   \n",
       "3       1         0             Allison, Mr. Hudson Joshua Creighton    male   \n",
       "4       1         0  Allison, Mrs. Hudson J C (Bessie Waldo Daniels)  female   \n",
       "\n",
       "      age  sibsp  parch  ticket      fare    cabin embarked boat body  \\\n",
       "0      29      0      0   24160  211.3375       B5        S    2    ?   \n",
       "1  0.9167      1      2  113781    151.55  C22 C26        S   11    ?   \n",
       "2       2      1      2  113781    151.55  C22 C26        S    ?    ?   \n",
       "3      30      1      2  113781    151.55  C22 C26        S    ?  135   \n",
       "4      25      1      2  113781    151.55  C22 C26        S    ?    ?   \n",
       "\n",
       "                         home.dest  \n",
       "0                     St Louis, MO  \n",
       "1  Montreal, PQ / Chesterville, ON  \n",
       "2  Montreal, PQ / Chesterville, ON  \n",
       "3  Montreal, PQ / Chesterville, ON  \n",
       "4  Montreal, PQ / Chesterville, ON  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = pd.read_csv('titanic.csv', sep=',')\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "18c43fee",
   "metadata": {},
   "source": [
    "### Remove rows with missing values"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "f1710d2d",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "embarked    2\n",
       "dtype: int64"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(df[['embarked']] == '?').sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "a244f663",
   "metadata": {},
   "outputs": [],
   "source": [
    "df = df[~(df['embarked'] == '?')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "296aa28f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "fare    1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "(df[['fare']] == '?').sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "8832515c",
   "metadata": {},
   "outputs": [],
   "source": [
    "df = df[~(df['fare'] == '?')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "74927bec",
   "metadata": {},
   "outputs": [],
   "source": [
    "df['fare'] = df['fare'].astype(float)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d47c5c9b",
   "metadata": {},
   "source": [
    "### Convert to array format"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ec7f2e8",
   "metadata": {},
   "source": [
    "- To keep things simple, we will only use a few columns in this dataset:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "64c787c4",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[1, 'female', 211.3375, 'S'],\n",
       "       [1, 'male', 151.55, 'S'],\n",
       "       [1, 'female', 151.55, 'S'],\n",
       "       [1, 'male', 151.55, 'S'],\n",
       "       [1, 'female', 151.55, 'S'],\n",
       "       [1, 'male', 26.55, 'S'],\n",
       "       [1, 'female', 77.9583, 'S'],\n",
       "       [1, 'male', 0.0, 'S'],\n",
       "       [1, 'female', 51.4792, 'S'],\n",
       "       [1, 'male', 49.5042, 'C']], dtype=object)"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "y = df['survived'].values\n",
    "\n",
    "feature_names = ['pclass', 'sex', 'fare', 'embarked']\n",
    "\n",
    "X = df[feature_names].values\n",
    "\n",
    "X[:10]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "b8493cb1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([808, 498])"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "np.bincount(y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "d21683fb",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "pclass: [1 2 3]\n",
      "sex: ['female' 'male']\n",
      "fare: [  0.       3.1708   4.0125   5.       6.2375   6.4375   6.45     6.4958\n",
      "   6.75     6.8583   6.95     6.975    7.       7.0458   7.05     7.0542\n",
      "   7.125    7.1417   7.225    7.2292   7.25     7.2833   7.3125   7.4958\n",
      "   7.5208   7.55     7.575    7.5792   7.6292   7.65     7.7208   7.725\n",
      "   7.7292   7.7333   7.7375   7.7417   7.75     7.775    7.7792   7.7875\n",
      "   7.7958   7.8      7.8208   7.8292   7.85     7.8542   7.875    7.8792\n",
      "   7.8875   7.8958   7.925    8.0292   8.05     8.1125   8.1375   8.1583\n",
      "   8.3      8.3625   8.4042   8.4333   8.4583   8.5167   8.6542   8.6625\n",
      "   8.6833   8.7125   8.85     8.9625   9.       9.2167   9.225    9.325\n",
      "   9.35     9.475    9.4833   9.5      9.5875   9.6875   9.825    9.8375\n",
      "   9.8417   9.8458  10.1708  10.4625  10.5     10.5167  10.7083  11.1333\n",
      "  11.2417  11.5     12.      12.1833  12.275   12.2875  12.35    12.475\n",
      "  12.525   12.65    12.7375  12.875   13.      13.4167  13.5     13.775\n",
      "  13.7917  13.8583  13.8625  13.9     14.      14.1083  14.4     14.4542\n",
      "  14.4583  14.5     15.      15.0333  15.0458  15.05    15.1     15.2458\n",
      "  15.5     15.55    15.5792  15.7417  15.75    15.85    15.9     16.\n",
      "  16.1     16.7     17.4     17.8     18.      18.75    18.7875  19.2583\n",
      "  19.5     19.9667  20.2125  20.25    20.525   20.575   21.      21.075\n",
      "  21.6792  22.025   22.3583  22.525   23.      23.25    23.45    24.\n",
      "  24.15    25.4667  25.5875  25.7     25.7417  25.925   25.9292  26.\n",
      "  26.25    26.2833  26.2875  26.3875  26.55    27.      27.4458  27.7208\n",
      "  27.75    27.9     28.5     28.5375  28.7125  29.      29.125   29.7\n",
      "  30.      30.0708  30.5     30.6958  31.      31.275   31.3875  31.5\n",
      "  31.6792  31.6833  32.3208  32.5     33.      33.5     34.0208  34.375\n",
      "  34.6542  35.      35.5     36.75    37.0042  38.5     39.      39.4\n",
      "  39.6     39.6875  40.125   41.5792  42.4     42.5     45.5     46.9\n",
      "  47.1     49.5     49.5042  50.      50.4958  51.4792  51.8625  52.\n",
      "  52.5542  53.1     55.      55.4417  55.9     56.4958  56.9292  57.\n",
      "  57.75    57.9792  59.4     60.      61.175   61.3792  61.9792  63.3583\n",
      "  65.      66.6     69.3     69.55    71.      71.2833  73.5     75.2417\n",
      "  75.25    76.2917  76.7292  77.2875  77.9583  78.2667  78.85    79.2\n",
      "  79.65    81.8583  82.1708  82.2667  83.1583  83.475   86.5     89.1042\n",
      "  90.      91.0792  93.5    106.425  108.9    110.8833 113.275  120.\n",
      " 133.65   134.5    135.6333 136.7792 146.5208 151.55   153.4625 164.8667\n",
      " 211.3375 211.5    221.7792 227.525  247.5208 262.375  263.     512.3292]\n",
      "embarked: ['C' 'Q' 'S']\n"
     ]
    }
   ],
   "source": [
    "for i in feature_names:\n",
    "    print(f'{i}: {np.unique(df[i].values)}')"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "07979956",
   "metadata": {},
   "source": [
    "- In this dataset, `'sex'` is a binary variable with only two values, so using categorical or onehot encoding is not necessary. However, we will do it anyways for demo purposes.\n",
    "- Here, `'embarked'` is a categorical variable with 3 possible values."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5f1e7626",
   "metadata": {},
   "source": [
    "### Onehot encoder pipeline"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3589fea4",
   "metadata": {},
   "source": [
    "- The OneHot encoder pipeline encodes `'sex'` and `'embarked'` into a onehot-encoded form. The remaining features remain unchanged."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "a97b917f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[  0.    ,   0.    ,   1.    ,   1.    , 211.3375],\n",
       "       [  1.    ,   0.    ,   1.    ,   1.    , 151.55  ],\n",
       "       [  0.    ,   0.    ,   1.    ,   1.    , 151.55  ],\n",
       "       ...,\n",
       "       [  1.    ,   0.    ,   0.    ,   3.    ,   7.225 ],\n",
       "       [  1.    ,   0.    ,   0.    ,   3.    ,   7.225 ],\n",
       "       [  1.    ,   0.    ,   1.    ,   3.    ,   7.875 ]])"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.pipeline import make_pipeline\n",
    "from sklearn.preprocessing import OneHotEncoder\n",
    "from sklearn.compose import ColumnTransformer\n",
    "\n",
    "\n",
    "ohe_features = ['sex', 'embarked']\n",
    "ohe_transformer = make_pipeline(OneHotEncoder(drop='first'))\n",
    "\n",
    "ohe_preprocessor = ColumnTransformer(\n",
    "    transformers=[\n",
    "        ('ohe', ohe_transformer, ohe_features)],\n",
    "    remainder='passthrough')\n",
    "\n",
    "ohe_preprocessor.fit_transform(df[feature_names])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "91ab1c7f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array(['ohe__sex_male', 'ohe__embarked_Q', 'ohe__embarked_S',\n",
       "       'remainder__pclass', 'remainder__fare'], dtype=object)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ohe_preprocessor.get_feature_names_out()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7fe8a1e2",
   "metadata": {},
   "source": [
    "### Ordinal/Categorical encoder pipeline"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4b8a8b77",
   "metadata": {},
   "source": [
    "- This pipeline will convert the string encoding of `'sex'` and `'embarked'` into an integer format."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "11201035",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[  0.    ,   2.    ,   1.    , 211.3375],\n",
       "       [  1.    ,   2.    ,   1.    , 151.55  ],\n",
       "       [  0.    ,   2.    ,   1.    , 151.55  ],\n",
       "       ...,\n",
       "       [  1.    ,   0.    ,   3.    ,   7.225 ],\n",
       "       [  1.    ,   0.    ,   3.    ,   7.225 ],\n",
       "       [  1.    ,   2.    ,   3.    ,   7.875 ]])"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.preprocessing import OrdinalEncoder\n",
    "\n",
    "\n",
    "cat_features = ['sex', 'embarked']\n",
    "cat_transformer = make_pipeline(OrdinalEncoder())\n",
    "\n",
    "cat_preprocessor = ColumnTransformer(\n",
    "    transformers=[\n",
    "        ('cat', cat_transformer, cat_features)],\n",
    "    remainder='passthrough')\n",
    "\n",
    "cat_preprocessor.fit_transform(df[feature_names])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a51145c5",
   "metadata": {},
   "source": [
    "- Note that feature index 0 corresponds to `'sex'`, and feature index 1 to `'embarked'`"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7358f5a5",
   "metadata": {},
   "source": [
    "### Train/Valid/Test splits"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d5d8ba89",
   "metadata": {},
   "source": [
    "- Next, we are splitting the dataset into the usual subsets."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "cb3f4403",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train/Valid/Test sizes: 783 261 262\n"
     ]
    }
   ],
   "source": [
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "\n",
    "df_X_temp, df_X_test, df_y_temp, df_y_test = \\\n",
    "    train_test_split(df[feature_names], df['survived'], test_size=0.20, random_state=123, stratify=df['survived'])\n",
    "\n",
    "df_X_train, df_X_valid, df_y_train, df_y_valid = \\\n",
    "    train_test_split(df_X_temp, df_y_temp, test_size=0.25, random_state=123, stratify=df_y_temp)\n",
    "\n",
    "print('Train/Valid/Test sizes:', df_y_train.shape[0], df_y_valid.shape[0], df_y_test.shape[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5dbafa2f",
   "metadata": {},
   "source": [
    "## Performance Baselines"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "88bce41f",
   "metadata": {},
   "source": [
    "### Majority class prediction"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "9dea26cc",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Test accuracy: 61.83%\n"
     ]
    }
   ],
   "source": [
    "bins = np.bincount(df_y_test)\n",
    "print(f'Test accuracy: {100* np.max(bins) / np.sum(bins):.2f}%', )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "283d5f4b",
   "metadata": {},
   "source": [
    "### Decision Tree (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "33702a4b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 90.80%\n",
      "Validation Accuracy: 77.01%\n",
      "Test Accuracy: 77.86%\n"
     ]
    }
   ],
   "source": [
    "from sklearn.tree import DecisionTreeClassifier\n",
    "\n",
    "\n",
    "tree = DecisionTreeClassifier(random_state=123)\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, tree)\n",
    "\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "\n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6ac09d90",
   "metadata": {},
   "source": [
    "### Decision Tree (Ordinal)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a4f09d01",
   "metadata": {},
   "source": [
    "- \"Ordinal\" means that the `'embarked'` variable is treated as an ordinal variable due to the integer encoding."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "ad069769",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 90.80%\n",
      "Validation Accuracy: 77.01%\n",
      "Test Accuracy: 77.48%\n"
     ]
    }
   ],
   "source": [
    "from sklearn.tree import DecisionTreeClassifier\n",
    "\n",
    "\n",
    "tree = DecisionTreeClassifier(random_state=123)\n",
    "clf_pipe = make_pipeline(cat_preprocessor, tree)\n",
    "\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "\n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "99be0f35",
   "metadata": {},
   "source": [
    "## Original gradient boosting (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "088c891f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 89.66%\n",
      "Validation Accuracy: 78.16%\n",
      "Test Accuracy: 78.24%\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "\n",
    "\n",
    "boost = GradientBoostingClassifier(\n",
    "    learning_rate=0.1,\n",
    "    n_estimators=100,\n",
    "    max_depth=4,\n",
    "    random_state=1)\n",
    "\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8699ca28",
   "metadata": {},
   "source": [
    "## Original gradient boosting (Ordinal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "c7deb7e4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 89.27%\n",
      "Validation Accuracy: 79.31%\n",
      "Test Accuracy: 76.34%\n"
     ]
    }
   ],
   "source": [
    "from sklearn.ensemble import GradientBoostingClassifier\n",
    "\n",
    "\n",
    "boost = GradientBoostingClassifier(\n",
    "    learning_rate=0.1,\n",
    "    n_estimators=100,\n",
    "    max_depth=4,\n",
    "    random_state=1)\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "36cb3408",
   "metadata": {},
   "source": [
    "## HistGradientBoostingClassifier (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "9506d181",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.74%\n",
      "Validation Accuracy: 77.01%\n",
      "Test Accuracy: 76.72%\n"
     ]
    }
   ],
   "source": [
    "#from sklearn.experimental import enable_hist_gradient_boosting\n",
    "from sklearn.ensemble import HistGradientBoostingClassifier\n",
    "\n",
    "\n",
    "boost = HistGradientBoostingClassifier(\n",
    "    learning_rate=0.1,\n",
    "    random_state=1)\n",
    "\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5942ca0a",
   "metadata": {},
   "source": [
    "## HistGradientBoostingClassifier (Ordinal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "d3087934",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 88.25%\n",
      "Validation Accuracy: 77.78%\n",
      "Test Accuracy: 77.10%\n"
     ]
    }
   ],
   "source": [
    "boost = HistGradientBoostingClassifier(\n",
    "    learning_rate=0.1,\n",
    "    random_state=1)\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "886e3125",
   "metadata": {},
   "source": [
    "## HistGradientBoostingClassifier (Categorical)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "9b766142",
   "metadata": {},
   "source": [
    "- In contrast to the \"ordinal\" control above, the \"categorical\" sections shows how to use the implemented support for categorical variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "e6bae1f0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.87%\n",
      "Validation Accuracy: 77.39%\n",
      "Test Accuracy: 78.63%\n"
     ]
    }
   ],
   "source": [
    "boost = HistGradientBoostingClassifier(\n",
    "    learning_rate=0.1,\n",
    "    categorical_features=[0, 1], # -> ['sex', 'embarked'],\n",
    "    random_state=1)\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "fe9a196c",
   "metadata": {},
   "source": [
    "## XGBoost (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "26c3be3a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[20:44:46] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/raschka/miniforge3/lib/python3.8/site-packages/xgboost/sklearn.py:888: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n",
      "  warnings.warn(label_encoder_deprecation_msg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 89.78%\n",
      "Validation Accuracy: 77.78%\n",
      "Test Accuracy: 78.63%\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import xgboost as xgb\n",
    "\n",
    "\n",
    "boost = xgb.XGBClassifier()\n",
    "\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "86066b55",
   "metadata": {},
   "source": [
    "## XGBoost (Ordinal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "b0fd15b1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[20:44:49] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/raschka/miniforge3/lib/python3.8/site-packages/xgboost/sklearn.py:888: UserWarning: The use of label encoder in XGBClassifier is deprecated and will be removed in a future release. To remove this warning, do the following: 1) Pass option use_label_encoder=False when constructing XGBClassifier object; and 2) Encode your labels (y) as integers starting with 0, i.e. 0, 1, 2, ..., [num_class - 1].\n",
      "  warnings.warn(label_encoder_deprecation_msg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 89.66%\n",
      "Validation Accuracy: 77.01%\n",
      "Test Accuracy: 77.86%\n"
     ]
    }
   ],
   "source": [
    "boost = xgb.XGBClassifier()\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "59b210c7",
   "metadata": {},
   "source": [
    "## XGBoost (Categorical) -- experimental"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "738e06b2",
   "metadata": {},
   "source": [
    "- In contrast to the \"ordinal\" control above, the \"categorical\" sections shows how to use the implemented support for categorical variables.\n",
    "- Afaik, XGBoost detects categorical features via their `DataFrame` column type:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "41b45c07",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "pclass         int64\n",
       "sex         category\n",
       "fare         float64\n",
       "embarked    category\n",
       "dtype: object"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df_X_train_new = df_X_train.copy()\n",
    "df_X_valid_new = df_X_valid.copy()\n",
    "df_X_test_new = df_X_test.copy()\n",
    "\n",
    "\n",
    "for name in ['sex', 'embarked']:\n",
    "    df_X_train_new[name] = df_X_train_new[name].astype('category')\n",
    "    df_X_valid_new[name] = df_X_valid_new[name].astype('category')\n",
    "    df_X_test_new[name] = df_X_test_new[name].astype('category')\n",
    "\n",
    "df_X_train_new.dtypes"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1f0722e2",
   "metadata": {},
   "source": [
    "- string variabels are not supported yet:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "a63edd94",
   "metadata": {},
   "outputs": [],
   "source": [
    "d = {'female': 0, 'male': 1}\n",
    "df_X_train_new['sex'] = df_X_train_new['sex'].map(d)\n",
    "df_X_valid_new['sex'] = df_X_valid_new['sex'].map(d)\n",
    "df_X_test_new['sex'] = df_X_test_new['sex'].map(d)\n",
    "\n",
    "d = {'C': 0, 'Q': 1, 'S': 2}\n",
    "df_X_train_new['embarked'] = df_X_train_new['embarked'].map(d)\n",
    "df_X_valid_new['embarked'] = df_X_valid_new['embarked'].map(d)\n",
    "df_X_test_new['embarked'] = df_X_test_new['embarked'].map(d)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "2151c2e0",
   "metadata": {},
   "source": [
    "- `boost = xgb.XGBClassifier(enable_categorical=True)` throws an error later, not recognizing categorical columns in `DataFrame`, hence use `DMatrix as a workaround:\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "96586ca4",
   "metadata": {},
   "outputs": [],
   "source": [
    "dtrain = xgb.DMatrix(df_X_train_new, label=df_y_train, enable_categorical=True)\n",
    "dvalid = xgb.DMatrix(df_X_valid_new, label=df_y_valid, enable_categorical=True)\n",
    "dtest = xgb.DMatrix(df_X_test_new, label=df_y_test, enable_categorical=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "a596e088",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.87%\n",
      "Validation Accuracy: 77.01%\n",
      "Test Accuracy: 75.95%\n"
     ]
    }
   ],
   "source": [
    "from sklearn.metrics import accuracy_score\n",
    "\n",
    "#boost = xgb.XGBClassifier(enable_categorical=True)\n",
    "\n",
    "gbm_model = xgb.train(params={}, dtrain=dtrain)\n",
    "\n",
    "train_predict = (gbm_model.predict(dtrain) > 0.5).astype(int)\n",
    "valid_predict = (gbm_model.predict(dvalid) > 0.5).astype(int)\n",
    "test_predict = (gbm_model.predict(dtest) > 0.5).astype(int)\n",
    "\n",
    "print(f\"Training Accuracy: {100*accuracy_score(train_predict, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*accuracy_score(valid_predict, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*accuracy_score(test_predict, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "dabb2294",
   "metadata": {},
   "source": [
    "## LightGBM (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "3a8fa014",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.23%\n",
      "Validation Accuracy: 77.39%\n",
      "Test Accuracy: 77.48%\n"
     ]
    }
   ],
   "source": [
    "import lightgbm as lgb\n",
    "\n",
    "\n",
    "boost = lgb.LGBMClassifier()\n",
    "\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d6ea2840",
   "metadata": {},
   "source": [
    "## LightGBM (Ordinal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "7b440d80",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.23%\n",
      "Validation Accuracy: 78.16%\n",
      "Test Accuracy: 77.10%\n"
     ]
    }
   ],
   "source": [
    "boost = lgb.LGBMClassifier()\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bff75bd4",
   "metadata": {},
   "source": [
    "## LightGBM (Categorical)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "917e04d8",
   "metadata": {},
   "source": [
    "- In contrast to the \"ordinal\" control above, the \"categorical\" sections shows how to use the implemented support for categorical variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "484ab34e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/raschka/miniforge3/lib/python3.8/site-packages/lightgbm/basic.py:1222: UserWarning: categorical_feature keyword has been found in `params` and will be ignored.\n",
      "Please use categorical_feature argument of the Dataset constructor to pass this parameter.\n",
      "  _log_warning('{0} keyword has been found in `params` and will be ignored.\\n'\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 87.23%\n",
      "Validation Accuracy: 78.16%\n",
      "Test Accuracy: 77.10%\n"
     ]
    }
   ],
   "source": [
    "boost = lgb.LGBMClassifier(categorical_feature=\"0,1\")\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "8f8ac7fd",
   "metadata": {},
   "source": [
    "## CatBoost (Onehot)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "6b2b9df2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 85.31%\n",
      "Validation Accuracy: 80.84%\n",
      "Test Accuracy: 75.57%\n"
     ]
    }
   ],
   "source": [
    "from catboost import CatBoostClassifier\n",
    "\n",
    "\n",
    "boost = CatBoostClassifier(verbose=0)\n",
    "\n",
    "clf_pipe = make_pipeline(ohe_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "539fc986",
   "metadata": {},
   "source": [
    "## CatBoost (Ordinal)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "982188b1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 85.06%\n",
      "Validation Accuracy: 80.84%\n",
      "Test Accuracy: 76.34%\n"
     ]
    }
   ],
   "source": [
    "from catboost import CatBoostClassifier\n",
    "\n",
    "\n",
    "boost = CatBoostClassifier(verbose=0)\n",
    "\n",
    "clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "clf_pipe.fit(df_X_train, df_y_train)\n",
    "    \n",
    "print(f\"Training Accuracy: {100*clf_pipe.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*clf_pipe.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*clf_pipe.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "197bb7cc",
   "metadata": {},
   "source": [
    "## CatBoost (Categorical)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3252bacc",
   "metadata": {},
   "source": [
    "- In contrast to the \"ordinal\" control above, the \"categorical\" sections shows how to use the implemented support for categorical variables"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "68d32f4a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Training Accuracy: 83.78%\n",
      "Validation Accuracy: 81.99%\n",
      "Test Accuracy: 75.57%\n"
     ]
    }
   ],
   "source": [
    "boost = CatBoostClassifier(verbose=0, cat_features=['sex', 'embarked'])\n",
    "\n",
    "\n",
    "#clf_pipe = make_pipeline(cat_preprocessor, boost)\n",
    "boost.fit(df_X_train, df_y_train)\n",
    "\n",
    "print(f\"Training Accuracy: {100*boost.score(df_X_train, df_y_train):0.2f}%\")\n",
    "print(f\"Validation Accuracy: {100*boost.score(df_X_valid, df_y_valid):0.2f}%\")\n",
    "print(f\"Test Accuracy: {100*boost.score(df_X_test, df_y_test):0.2f}%\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
